You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2016/10/19 00:35:18 UTC

[01/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Repository: hive
Updated Branches:
  refs/heads/hive-14535 b6571eaef -> 3f34134a6


http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index b297a7d..22fe7cd 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -1,7 +1,7 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Use ORDER BY clauses to generate 2 stages.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ctinyint) as c1,
        MAX(ctinyint),
        COUNT(ctinyint),
@@ -12,7 +12,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Use ORDER BY clauses to generate 2 stages.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ctinyint) as c1,
        MAX(ctinyint),
        COUNT(ctinyint),
@@ -20,10 +20,6 @@ SELECT MIN(ctinyint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -41,100 +37,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(ctinyint), max(ctinyint), count(ctinyint), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 0) -> tinyint, VectorUDAFMaxLong(col 0) -> tinyint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 0) -> tinyint, VectorUDAFMaxLong(col 1) -> tinyint, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: tinyint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: tinyint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -166,20 +104,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -64	62	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -197,99 +131,41 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(ctinyint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 0) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -315,7 +191,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -39856
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(ctinyint) as c1,
   variance(ctinyint),
@@ -328,7 +204,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(ctinyint) as c1,
   variance(ctinyint),
@@ -341,10 +217,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -376,20 +248,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 636 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:tinyint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:tinyint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -403,13 +262,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -458,7 +310,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -4.344925324321378	1158.3003004768184	1158.3003004768184	1158.4265870337827	34.033811136527426	34.033811136527426	34.033811136527426	34.03566639620536
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(cbigint) as c1,
        MAX(cbigint),
        COUNT(cbigint),
@@ -466,7 +318,7 @@ SELECT MIN(cbigint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(cbigint) as c1,
        MAX(cbigint),
        COUNT(cbigint),
@@ -474,10 +326,6 @@ SELECT MIN(cbigint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -495,100 +343,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: cbigint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(cbigint), max(cbigint), count(cbigint), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 3) -> bigint, VectorUDAFMaxLong(col 3) -> bigint, VectorUDAFCount(col 3) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 0) -> bigint, VectorUDAFMaxLong(col 1) -> bigint, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -620,20 +410,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -2147311592	2145498388	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -651,99 +437,41 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: cbigint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(cbigint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -769,7 +497,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -1698460028409
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(cbigint) as c1,
   variance(cbigint),
@@ -782,7 +510,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(cbigint) as c1,
   variance(cbigint),
@@ -795,10 +523,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -830,20 +554,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:bigint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:bigint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -857,13 +568,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -912,7 +616,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -1.8515862077935246E8	2.07689300543081907E18	2.07689300543081907E18	2.07711944383088768E18	1.441142951074188E9	1.441142951074188E9	1.441142951074188E9	1.4412215110214279E9
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(cfloat) as c1,
        MAX(cfloat),
        COUNT(cfloat),
@@ -920,7 +624,7 @@ SELECT MIN(cfloat) as c1,
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(cfloat) as c1,
        MAX(cfloat),
        COUNT(cfloat),
@@ -928,10 +632,6 @@ SELECT MIN(cfloat) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -949,100 +649,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cfloat (type: float)
                     outputColumnNames: cfloat
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(cfloat), max(cfloat), count(cfloat), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinDouble(col 4) -> float, VectorUDAFMaxDouble(col 4) -> float, VectorUDAFCount(col 4) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinDouble(col 0) -> float, VectorUDAFMaxDouble(col 1) -> float, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: float)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: float), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1074,20 +716,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -64.0	79.553	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1105,99 +743,41 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cfloat (type: float)
                     outputColumnNames: cfloat
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(cfloat)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumDouble(col 4) -> double
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumDouble(col 0) -> double
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: double)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1223,7 +803,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -39479.635992884636
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(cfloat) as c1,
   variance(cfloat),
@@ -1236,7 +816,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(cfloat) as c1,
   variance(cfloat),
@@ -1249,10 +829,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1284,20 +860,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 636 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:float>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:float> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -1311,13 +874,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -1367,7 +923,7 @@ POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -4.303895780321011	1163.8972588604984	1163.8972588604984	1164.0241556397025	34.115938487171924	34.115938487171924	34.115938487171924	34.11779822379666
 WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),
@@ -1394,7 +950,7 @@ WHERE  (((cstring2 LIKE '%b%')
             AND ((cboolean2 = 1)
                  AND (3569 = ctinyint))))
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),
@@ -1421,10 +977,6 @@ WHERE  (((cstring2 LIKE '%b%')
             AND ((cboolean2 = 1)
                  AND (3569 = ctinyint))))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1441,33 +993,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7, pattern %b%) -> boolean, FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 12)(children: CastLongToDecimal(col 2) -> 12:decimal(13,3)) -> boolean, FilterDoubleColLessDoubleColumn(col 13, col 5)(children: CastLongToDouble(col 3) -> 13:double) -> boolean, FilterExprAndExpr(children: FilterLongColGreaterEqualLongColumn(col 0, col 1)(children: col 0) -> boolean, FilterLongColEqualLongScalar(col 11, val 1) -> boolean, FilterLongScalarEqualLongColumn(val 3569, col 0)(children: col 0) -> boolean) -> boolean) -> boolean
                     predicate: ((cstring2 like '%b%') or (79.553 <> CAST( cint AS decimal(13,3))) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569 = UDFToInteger(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)
                       outputColumnNames: cbigint, cfloat, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3, 4, 0]
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFAvgLong(col 3) -> struct<count:bigint,sum:double>, VectorUDAFStdPopLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarSampLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint, VectorUDAFSumDouble(col 4) -> double, VectorUDAFMinLong(col 0) -> tinyint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(col 3) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarSampLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE
@@ -1476,20 +1010,7 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: struct<count:bigint,sum:double,input:bigint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: bigint), _col4 (type: double), _col5 (type: tinyint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:bigint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), stddev_pop(VALUE._col1), var_samp(VALUE._col2), count(VALUE._col3), sum(VALUE._col4), min(VALUE._col5)


[20/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
index cf90430..06e30d8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
@@ -49,15 +49,11 @@ POSTHOOK: Output: default@myinput1
 POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 PREHOOK: type: QUERY
 POSTHOOK: query: -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -75,20 +71,12 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
                     keys:
                       0 key (type: int)
                       1 value (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false, No nullsafe IS false
                     nullSafes: [true]
                     outputColumnNames: _col0, _col1, _col5, _col6
                     input vertices:
@@ -97,16 +85,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3]
                       Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -114,42 +95,19 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int)
                     sort order: +
                     Map-reduce partition columns: value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -176,14 +134,10 @@ NULL	35	NULL	NULL
 NULL	NULL	10	NULL
 NULL	NULL	48	NULL
 NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -201,14 +155,7 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
@@ -219,11 +166,6 @@ STAGE PLANS:
                         0 key (type: int)
                         1 value (type: int)
                         2 key (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false, One MapJoin Condition IS false
                       outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                       input vertices:
                         1 Map 2
@@ -232,16 +174,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -249,84 +184,38 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: int)
                       sort order: +
                       Map-reduce partition columns: value (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -344,14 +233,10 @@ POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 10	NULL	NULL	10	10	NULL
 100	100	100	100	100	100
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -369,9 +254,6 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
@@ -380,11 +262,6 @@ STAGE PLANS:
                       0 key (type: int)
                       1 value (type: int)
                       2 key (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false, One MapJoin Condition IS false, No nullsafe IS false
                     nullSafes: [true]
                     outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                     input vertices:
@@ -394,16 +271,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                       Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -411,70 +281,32 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int)
                     sort order: +
                     Map-reduce partition columns: value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: key (type: int)
                     sort order: +
                     Map-reduce partition columns: key (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: value (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -519,14 +351,10 @@ NULL	NULL	48	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	10
 NULL	NULL	NULL	NULL	NULL	35
 NULL	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -544,14 +372,7 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
@@ -562,11 +383,6 @@ STAGE PLANS:
                         0 key (type: int), value (type: int)
                         1 value (type: int), key (type: int)
                         2 key (type: int), value (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false, One MapJoin Condition IS false, No nullsafe IS false
                       nullSafes: [true, false]
                       outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                       input vertices:
@@ -576,16 +392,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -593,82 +402,36 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: int), key (type: int)
                       sort order: ++
                       Map-reduce partition columns: value (type: int), key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkMultiKeyOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int), value (type: int)
                       sort order: ++
                       Map-reduce partition columns: key (type: int), value (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkMultiKeyOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -686,14 +449,10 @@ POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 100	100	100	100	100	100
 NULL	10	10	NULL	NULL	10
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -711,9 +470,6 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
@@ -722,11 +478,6 @@ STAGE PLANS:
                       0 key (type: int), value (type: int)
                       1 value (type: int), key (type: int)
                       2 key (type: int), value (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false, One MapJoin Condition IS false, No nullsafe IS false
                     nullSafes: [true, true]
                     outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                     input vertices:
@@ -736,16 +487,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                       Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -753,68 +497,30 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int), key (type: int)
                     sort order: ++
                     Map-reduce partition columns: value (type: int), key (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkMultiKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: key (type: int), value (type: int)
                     sort order: ++
                     Map-reduce partition columns: key (type: int), value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkMultiKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -938,15 +644,11 @@ NULL	NULL	10	NULL
 NULL	NULL	48	NULL
 NULL	NULL	NULL	NULL
 PREHOOK: query: -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 PREHOOK: type: QUERY
 POSTHOOK: query: -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -964,20 +666,12 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
                     keys:
                       0 key (type: int)
                       1 value (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: No nullsafe IS false
                     nullSafes: [true]
                     outputColumnNames: _col0, _col1, _col5, _col6
                     input vertices:
@@ -986,16 +680,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3]
                       Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1003,42 +690,19 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int)
                     sort order: +
                     Map-reduce partition columns: value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -1065,14 +729,10 @@ NULL	35	NULL	NULL
 NULL	NULL	10	NULL
 NULL	NULL	48	NULL
 NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1090,14 +750,7 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
@@ -1108,11 +761,6 @@ STAGE PLANS:
                         0 key (type: int)
                         1 value (type: int)
                         2 key (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: One MapJoin Condition IS false
                       outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                       input vertices:
                         1 Map 2
@@ -1121,16 +769,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1138,84 +779,38 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: int)
                       sort order: +
                       Map-reduce partition columns: value (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -1233,14 +828,10 @@ POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 10	NULL	NULL	10	10	NULL
 100	100	100	100	100	100
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1258,9 +849,6 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
@@ -1269,11 +857,6 @@ STAGE PLANS:
                       0 key (type: int)
                       1 value (type: int)
                       2 key (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: One MapJoin Condition IS false, No nullsafe IS false
                     nullSafes: [true]
                     outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                     input vertices:
@@ -1283,16 +866,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                       Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1300,70 +876,32 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int)
                     sort order: +
                     Map-reduce partition columns: value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: key (type: int)
                     sort order: +
                     Map-reduce partition columns: key (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkLongOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: value (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -1408,14 +946,10 @@ NULL	NULL	48	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	10
 NULL	NULL	NULL	NULL	NULL	35
 NULL	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1433,14 +967,7 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
@@ -1451,11 +978,6 @@ STAGE PLANS:
                         0 key (type: int), value (type: int)
                         1 value (type: int), key (type: int)
                         2 key (type: int), value (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: One MapJoin Condition IS false, No nullsafe IS false
                       nullSafes: [true, false]
                       outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                       input vertices:
@@ -1465,16 +987,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1482,82 +997,36 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: int), key (type: int)
                       sort order: ++
                       Map-reduce partition columns: value (type: int), key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkMultiKeyOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: value is not null (type: boolean)
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int), value (type: int)
                       sort order: ++
                       Map-reduce partition columns: key (type: int), value (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkMultiKeyOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -1575,14 +1044,10 @@ POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 100	100	100	100	100	100
 NULL	10	10	NULL	NULL	10
-PREHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1600,9 +1065,6 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
@@ -1611,11 +1073,6 @@ STAGE PLANS:
                       0 key (type: int), value (type: int)
                       1 value (type: int), key (type: int)
                       2 key (type: int), value (type: int)
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: One MapJoin Condition IS false, No nullsafe IS false
                     nullSafes: [true, true]
                     outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
                     input vertices:
@@ -1625,16 +1082,9 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                       Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1642,68 +1092,30 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: value (type: int), key (type: int)
                     sort order: ++
                     Map-reduce partition columns: value (type: int), key (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkMultiKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: c
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Reduce Output Operator
                     key expressions: key (type: int), value (type: int)
                     sort order: ++
                     Map-reduce partition columns: key (type: int), value (type: int)
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkMultiKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
index c4bbeff..6479ec5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
@@ -103,7 +103,7 @@ scratch.t	scratch.si	scratch.i	scratch.b	scratch.f	scratch.d	scratch.dc
 PREHOOK: query: --
 -- Projection LongCol<Compare>LongScalar
 --
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, (t < 0) as compare1, (si <= 0) as compare2, (i = 0) as compare3 from vectortab2k_orc
         order by t, si, i) as q
@@ -111,16 +111,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: --
 -- Projection LongCol<Compare>LongScalar
 --
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, (t < 0) as compare1, (si <= 0) as compare2, (i = 0) as compare3 from vectortab2k_orc
         order by t, si, i) as q
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -138,45 +134,19 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2k_orc
                   Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                   Select Operator
                     expressions: t (type: tinyint), si (type: smallint), i (type: int), (t < 0) (type: boolean), (si <= 0) (type: boolean), (i = 0) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 7, 8, 9]
-                        selectExpressions: LongColLessLongScalar(col 0, val 0) -> 7:long, LongColLessEqualLongScalar(col 1, val 0) -> 8:long, LongColEqualLongScalar(col 2, val 0) -> 9:long
                     Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Select expression for SELECT operator: UDF GenericUDFHash(Column[KEY.reducesinkkey0], Column[KEY.reducesinkkey1], Column[KEY.reducesinkkey2], Column[VALUE._col0], Column[VALUE._col1], Column[VALUE._col2]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: hash(KEY.reducesinkkey0,KEY.reducesinkkey1,KEY.reducesinkkey2,VALUE._col0,VALUE._col1,VALUE._col2) (type: int)
@@ -215,21 +185,17 @@ POSTHOOK: Input: default@vectortab2k_orc
 #### A masked pattern was here ####
 c0
 -3601806268
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, b, (t > 0) as compare1, (si >= 0) as compare2, (i != 0) as compare3, (b > 0) as compare4 from vectortab2k_orc
         order by t, si, i, b) as q
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, b, (t > 0) as compare1, (si >= 0) as compare2, (i != 0) as compare3, (b > 0) as compare4 from vectortab2k_orc
         order by t, si, i, b) as q
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -247,45 +213,19 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2k_orc
                   Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                   Select Operator
                     expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), (t > 0) (type: boolean), (si >= 0) (type: boolean), (i <> 0) (type: boolean), (b > 0) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 7, 8, 9, 10]
-                        selectExpressions: LongColGreaterLongScalar(col 0, val 0) -> 7:long, LongColGreaterEqualLongScalar(col 1, val 0) -> 8:long, LongColNotEqualLongScalar(col 2, val 0) -> 9:long, LongColGreaterLongScalar(col 3, val 0) -> 10:long
                     Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint)
                       sort order: ++++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Select expression for SELECT operator: UDF GenericUDFHash(Column[KEY.reducesinkkey0], Column[KEY.reducesinkkey1], Column[KEY.reducesinkkey2], Column[KEY.reducesinkkey3], Column[VALUE._col0], Column[VALUE._col1], Column[VALUE._col2], Column[VALUE._col3]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: hash(KEY.reducesinkkey0,KEY.reducesinkkey1,KEY.reducesinkkey2,KEY.reducesinkkey3,VALUE._col0,VALUE._col1,VALUE._col2,VALUE._col3) (type: int)


[50/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupColumn.txt
index 9114932..a72b882 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupColumn.txt
@@ -477,11 +477,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
index b56d451..8b1c366 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -154,9 +152,4 @@ public abstract class <ClassName> extends VectorExpression {
     this.value = value;
   }
 
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + new String(value, StandardCharsets.UTF_8);
-  }
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
index 4fb5035..930069c 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -157,10 +155,4 @@ public abstract class <ClassName> extends VectorExpression {
   public void setValue(byte[] value) {
     this.value = value;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "val " + new String(value, StandardCharsets.UTF_8) + ", col " + + colNum;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
index 7863b16..4298d79 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
@@ -154,11 +154,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", left " + leftValue.toString() + ", right " + rightValue.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
index 8873826..d10be96 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
@@ -167,11 +167,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
index 8583eee..31c3f6b 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
@@ -150,11 +150,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
index eeb73c9..31dce1c 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
@@ -436,11 +436,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
index 23790a5..bab8508 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
@@ -147,11 +147,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
index 0e10779..5e418de 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
@@ -43,11 +43,6 @@ public class <ClassName> extends <BaseClassName> {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
index 5a6def3..ff5d11e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
@@ -149,11 +149,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringColumnBetween.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringColumnBetween.txt
index 781c9b8..94a174d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringColumnBetween.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.common.type.<TruncStringHiveType>;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
@@ -180,13 +178,7 @@ public class <ClassName> extends VectorExpression {
   public void setRight(byte[] value) {
     this.right = value;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-     return "col " + colNum + ", left " + new String(left, StandardCharsets.UTF_8) +
-         ", right " + new String(right, StandardCharsets.UTF_8);
-  }
-
+  
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt
index 9f4bb75..3a75a26 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IfExprColumnScalar.txt
@@ -160,11 +160,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col " + arg2Column + ", val "+ arg3Scalar;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt
index 487d894..648b776 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarColumn.txt
@@ -162,11 +162,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt
index 5651d15..def9863 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IfExprScalarScalar.txt
@@ -147,11 +147,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", val "+ arg3Scalar;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateColumn.txt
index 49a1950..8e3a419 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateColumn.txt
@@ -180,11 +180,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateScalar.txt
index 283352d..ad65d52 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticDateScalar.txt
@@ -141,11 +141,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampColumn.txt
index 9eba829..858c3d7 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampColumn.txt
@@ -170,11 +170,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampScalar.txt
index 9a06822..66fffd2 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthColumnArithmeticTimestampScalar.txt
@@ -139,11 +139,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticDateColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticDateColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticDateColumn.txt
index a5d9877..ddde913 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticDateColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticDateColumn.txt
@@ -155,11 +155,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticTimestampColumn.txt
index 9a0d397..cbb7021 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/IntervalYearMonthScalarArithmeticTimestampColumn.txt
@@ -143,11 +143,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampColumn.txt
index cff2deb..9ccfaac 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampColumn.txt
@@ -139,11 +139,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampScalar.txt
index 8308a30..c7d8c65 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleColumnCompareTimestampScalar.txt
@@ -129,10 +129,6 @@ public class <ClassName> extends VectorExpression {
     return "long";
   }
 
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleScalarCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleScalarCompareTimestampColumn.txt
index 6aa30e4..d47bc10 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/LongDoubleScalarCompareTimestampColumn.txt
@@ -129,11 +129,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumn.txt
index 8473599..4fcbdc0 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumn.txt
@@ -146,11 +146,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt
index d3fd9bd..ea55bec 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnDecimal.txt
@@ -129,11 +129,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnWithConvert.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnWithConvert.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnWithConvert.txt
new file mode 100644
index 0000000..91887c8
--- /dev/null
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarArithmeticColumnWithConvert.txt
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
+
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+
+
+/*
+ * Because of the templatized nature of the code, either or both
+ * of these ColumnVector imports may be needed. Listing both of them
+ * rather than using ....vectorization.*;
+ */
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
+
+/**
+ * Generated from template ScalarArithmeticColumnWithConvert.txt.
+ * Implements a vectorized arithmetic operator with a scalar on the left and a
+ * column vector on the right. The result is output to an output column vector.
+ */
+public class <ClassName> extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private int colNum;
+  private <VectorOperandType1> value;
+  private int outputColumn;
+
+  public <ClassName>(<VectorOperandType1> value, int colNum, int outputColumn) {
+    this.colNum = colNum;
+    this.value = <TypeConversion1>(value);
+    this.outputColumn = outputColumn;
+  }
+
+  public <ClassName>() {
+  }
+
+  @Override
+  /**
+   * Method to evaluate scalar-column operation in vectorized fashion.
+   *
+   * @batch a package of rows with each column stored in a vector
+   */
+  public void evaluate(VectorizedRowBatch batch) {
+
+    if (childExpressions != null) {
+      super.evaluateChildren(batch);
+    }
+
+    <InputColumnVectorType> inputColVector = (<InputColumnVectorType>) batch.cols[colNum];
+    <OutputColumnVectorType> outputColVector = (<OutputColumnVectorType>) batch.cols[outputColumn];
+    int[] sel = batch.selected;
+    boolean[] inputIsNull = inputColVector.isNull;
+    boolean[] outputIsNull = outputColVector.isNull;
+    outputColVector.noNulls = inputColVector.noNulls;
+    outputColVector.isRepeating = inputColVector.isRepeating;
+    int n = batch.size;
+    <VectorOperandType2>[] vector = inputColVector.vector;
+    <VectorReturnType>[] outputVector = outputColVector.vector;
+    
+    // return immediately if batch is empty
+    if (n == 0) {
+      return;
+    }
+
+    if (inputColVector.isRepeating) {
+      outputVector[0] = value <OperatorSymbol> <TypeConversion2>(vector[0]);
+      
+      // Even if there are no nulls, we always copy over entry 0. Simplifies code.
+      outputIsNull[0] = inputIsNull[0]; 
+    } else if (inputColVector.noNulls) {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = value <OperatorSymbol> <TypeConversion2>(vector[i]);
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = value <OperatorSymbol> <TypeConversion2>(vector[i]);
+        }
+      }
+    } else {                         /* there are nulls */ 
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = value <OperatorSymbol> <TypeConversion2>(vector[i]);
+          outputIsNull[i] = inputIsNull[i];
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = value <OperatorSymbol> <TypeConversion2>(vector[i]);
+        }
+        System.arraycopy(inputIsNull, 0, outputIsNull, 0, n);
+      }
+    }
+    
+    NullUtil.setNullOutputEntriesColScalar(outputColVector, batch.selectedInUse, sel, n);
+  }
+
+  @Override
+  public int getOutputColumn() {
+    return outputColumn;
+  }
+  
+  @Override
+  public String getOutputType() {
+    return "<VectorReturnType>";
+  }
+  
+  public int getColNum() {
+    return colNum;
+  }
+
+  public void setColNum(int colNum) {
+    this.colNum = colNum;
+  }
+
+  public <VectorOperandType1> getValue() {
+    return value;
+  }
+
+  public void setValue(<VectorOperandType1> value) {
+    this.value = value;
+  }
+
+  public void setOutputColumn(int outputColumn) {
+    this.outputColumn = outputColumn;
+  }
+
+  @Override
+  public VectorExpressionDescriptor.Descriptor getDescriptor() {
+    return (new VectorExpressionDescriptor.Builder())
+        .setMode(
+            VectorExpressionDescriptor.Mode.PROJECTION)
+        .setNumArguments(2)
+        .setArgumentTypes(
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+        .setInputExpressionTypes(
+            VectorExpressionDescriptor.InputExpressionType.SCALAR,
+            VectorExpressionDescriptor.InputExpressionType.COLUMN).build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt
index 6f9e2e2..e6e59f5 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarCompareColumn.txt
@@ -149,11 +149,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumn.txt
index 8e6e8a9..f8a8457 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumn.txt
@@ -162,11 +162,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt
index 1014978..c8a5d17 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ScalarDivideColumnDecimal.txt
@@ -131,11 +131,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupColumn.txt
index 747f707..e881037 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupColumn.txt
@@ -493,11 +493,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
index d9530d6..92bf27a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -157,8 +155,4 @@ public abstract class <ClassName> extends VectorExpression {
     this.outputColumn = outputColumn;
   }
 
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + new String(value, StandardCharsets.UTF_8);
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
index 8e36fc0..8a92f54 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.<BaseClassName>;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.common.type.<TruncStringHiveType>;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
@@ -46,11 +44,6 @@ public class <ClassName> extends <BaseClassName> {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + new String(value, StandardCharsets.UTF_8);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
index 5eed703..238dc93 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -156,9 +154,4 @@ public abstract class <ClassName> extends VectorExpression {
   public void setOutputColumn(int outputColumn) {
     this.outputColumn = outputColumn;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "val " + new String(value, StandardCharsets.UTF_8) + ", col " + + colNum;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateColumn.txt
index 7aeff81..27e083d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateColumn.txt
@@ -171,11 +171,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalar.txt
index f8cb880..8b91a4a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalar.txt
@@ -132,11 +132,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthColumn.txt
index 989e2f5..4ac2174 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthColumn.txt
@@ -170,11 +170,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthScalar.txt
index a90b1b2..9382aca 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticIntervalYearMonthScalar.txt
@@ -128,11 +128,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampColumn.txt
index ad43cac..5eaa450 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampColumn.txt
@@ -161,11 +161,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampScalar.txt
index 32b49a3..c6c872f 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticTimestampScalar.txt
@@ -130,11 +130,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleColumn.txt
index 7267148..0fc402d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleColumn.txt
@@ -138,11 +138,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleScalar.txt
index 2be05f3..e0ae206 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareLongDoubleScalar.txt
@@ -129,11 +129,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampColumn.txt
index 2710fa4..f9fc425 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampColumn.txt
@@ -143,11 +143,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampScalar.txt
index 32647f2..90701ec 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnCompareTimestampScalar.txt
@@ -133,11 +133,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticDateColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticDateColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticDateColumn.txt
index dea4db2..f958be8 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticDateColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticDateColumn.txt
@@ -153,11 +153,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticIntervalYearMonthColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticIntervalYearMonthColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticIntervalYearMonthColumn.txt
index e82b9e2..585027a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticIntervalYearMonthColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticIntervalYearMonthColumn.txt
@@ -152,11 +152,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticTimestampColumn.txt
index 0d8a26b..996c86a 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarArithmeticTimestampColumn.txt
@@ -143,11 +143,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarCompareTimestampColumn.txt
index ec0a395..6506c93 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TimestampScalarCompareTimestampColumn.txt
@@ -135,11 +135,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
index 26da73a..a9a3b6d 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.<BaseClassName>;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.common.type.<TruncStringHiveType>;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
index 4393c3b..d153fd6 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
@@ -85,12 +85,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
     
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private Object[] partialResult;
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
index 7468c2f..46d66bd 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
@@ -77,12 +77,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
     
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     private transient VectorExpressionWriter resultWriter;
     
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
index 6b91fc2..9a48171 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
@@ -83,12 +83,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     private transient VectorExpressionWriter resultWriter;
 
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
index 749e97e..3cdf7e2 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
@@ -81,12 +81,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     private transient VectorExpressionWriter resultWriter;
 
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
index 9dfc147..cdce457 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
@@ -93,12 +93,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private Text result;
 
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
index 32ecb34..7e34965 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
@@ -83,12 +83,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     private transient VectorExpressionWriter resultWriter;
 
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
index bd0f14d..cc7e54d 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
@@ -78,12 +78,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
     
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private final <OutputType> result;
     
     public <ClassName>(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
index dc9d4b1..c6c9c52 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
@@ -84,12 +84,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
index 01062a9..8fc94ba 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
@@ -111,12 +111,6 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index 74cec3e..b8a4693 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.exec;
 
 import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
 
-import org.apache.commons.lang3.tuple.ImmutablePair;
-
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.Serializable;
@@ -37,68 +35,30 @@ import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Stack;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
 import org.apache.hadoop.hive.common.jsonexplain.JsonParserFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.Validator.StringSet;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
-import org.apache.hadoop.hive.ql.exec.tez.TezTask;
-import org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
-import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
-import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
-import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger;
-import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
-import org.apache.hadoop.hive.ql.optimizer.physical.VectorizerReason;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
-import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.MapWork;
-import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TezWork;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc;
-import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationFactory;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.AnnotationUtils;
@@ -197,54 +157,6 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
     return outJSONObject;
   }
 
-  private static String trueCondNameVectorizationEnabled =
-      HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname + " IS true";
-  private static String falseCondNameVectorizationEnabled =
-      HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname + " IS false";
-
-  private ImmutablePair<Boolean, JSONObject> outputPlanVectorization(PrintStream out, boolean jsonOutput)
-      throws Exception {
-
-    if (out != null) {
-      out.println("PLAN VECTORIZATION:");
-    }
-
-    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;
-
-    HiveConf hiveConf = queryState.getConf();
-
-    boolean isVectorizationEnabled = HiveConf.getBoolVar(hiveConf,
-        HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
-    String isVectorizationEnabledCondName =
-        (isVectorizationEnabled ?
-            trueCondNameVectorizationEnabled :
-              falseCondNameVectorizationEnabled);
-    List<String> isVectorizationEnabledCondList = Arrays.asList(isVectorizationEnabledCondName);
-
-    if (out != null) {
-      out.print(indentString(2));
-      out.print("enabled: ");
-      out.println(isVectorizationEnabled);
-      out.print(indentString(2));
-      if (!isVectorizationEnabled) {
-        out.print("enabledConditionsNotMet: ");
-      } else {
-        out.print("enabledConditionsMet: ");
-      }
-      out.println(isVectorizationEnabledCondList);
-    }
-    if (jsonOutput) {
-      json.put("enabled", isVectorizationEnabled);
-      if (!isVectorizationEnabled) {
-        json.put("enabledConditionsNotMet", isVectorizationEnabledCondList);
-      } else {
-        json.put("enabledConditionsMet", isVectorizationEnabledCondList);
-      }
-    }
-
-    return new ImmutablePair<Boolean, JSONObject>(isVectorizationEnabled, jsonOutput ? json : null);
-  }
-
   public JSONObject getJSONPlan(PrintStream out, ExplainWork work)
       throws Exception {
     return getJSONPlan(out, work.getRootTasks(), work.getFetchTask(),
@@ -272,46 +184,26 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
       ordered.add(fetchTask);
     }
 
-    boolean suppressOthersForVectorization = false;
-    if (this.work != null && this.work.isVectorization()) {
-      ImmutablePair<Boolean, JSONObject> planVecPair = outputPlanVectorization(out, jsonOutput);
-  
-      if (this.work.isVectorizationOnly()) {
-        // Suppress the STAGES if vectorization is off.
-        suppressOthersForVectorization = !planVecPair.left;
-      }
+    JSONObject jsonDependencies = outputDependencies(out, jsonOutput, appendTaskType, ordered);
 
-      if (out != null) {
-        out.println();
-      }
-  
-      if (jsonOutput) {
-        outJSONObject.put("PLAN VECTORIZATION", planVecPair.right);
-      }
+    if (out != null) {
+      out.println();
     }
 
-    if (!suppressOthersForVectorization) {
-      JSONObject jsonDependencies = outputDependencies(out, jsonOutput, appendTaskType, ordered);
-
-      if (out != null) {
-        out.println();
-      }
-
-      if (jsonOutput) {
-        outJSONObject.put("STAGE DEPENDENCIES", jsonDependencies);
-      }
+    if (jsonOutput) {
+      outJSONObject.put("STAGE DEPENDENCIES", jsonDependencies);
+    }
 
-      // Go over all the tasks and dump out the plans
-      JSONObject jsonPlan = outputStagePlans(out, ordered,
-           jsonOutput, isExtended);
+    // Go over all the tasks and dump out the plans
+    JSONObject jsonPlan = outputStagePlans(out, ordered,
+         jsonOutput, isExtended);
 
-      if (jsonOutput) {
-        outJSONObject.put("STAGE PLANS", jsonPlan);
-      }
+    if (jsonOutput) {
+      outJSONObject.put("STAGE PLANS", jsonPlan);
+    }
 
-      if (fetchTask != null) {
-        fetchTask.setParentTasks(null);
-      }
+    if (fetchTask != null) {
+      fetchTask.setParentTasks(null);
     }
 
     return jsonOutput ? outJSONObject : null;
@@ -710,64 +602,6 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
         }
       }
       if (invokeFlag) {
-        Vectorization vectorization = xpl_note.vectorization();
-        if (this.work != null && this.work.isVectorization()) {
-
-          // The EXPLAIN VECTORIZATION option was specified.
-          final boolean desireOnly = this.work.isVectorizationOnly();
-          final VectorizationDetailLevel desiredVecDetailLevel =
-              this.work.isVectorizationDetailLevel();
-
-          switch (vectorization) {
-          case NON_VECTORIZED:
-            // Display all non-vectorized leaf objects unless ONLY.
-            if (desireOnly) {
-              invokeFlag = false;
-            }
-            break;
-          case SUMMARY:
-          case OPERATOR:
-          case EXPRESSION:
-          case DETAIL:
-            if (vectorization.rank < desiredVecDetailLevel.rank) {
-              // This detail not desired.
-              invokeFlag = false;
-            }
-            break;
-          case SUMMARY_PATH:
-          case OPERATOR_PATH:
-            if (desireOnly) {
-              if (vectorization.rank < desiredVecDetailLevel.rank) {
-                // Suppress headers and all objects below.
-                invokeFlag = false;
-              }
-            }
-            break;
-          default:
-            throw new RuntimeException("Unknown EXPLAIN vectorization " + vectorization);
-          }
-        } else  {
-          // Do not display vectorization objects.
-          switch (vectorization) {
-          case SUMMARY:
-          case OPERATOR:
-          case EXPRESSION:
-          case DETAIL:
-            invokeFlag = false;
-            break;
-          case NON_VECTORIZED:
-            // No action.
-            break;
-          case SUMMARY_PATH:
-          case OPERATOR_PATH:
-            // Always include headers since they contain non-vectorized objects, too.
-            break;
-          default:
-            throw new RuntimeException("Unknown EXPLAIN vectorization " + vectorization);
-          }
-        }
-      }
-      if (invokeFlag) {
         keyJSONObject = xpl_note.displayName();
         if (out != null) {
           out.print(indentString(indent));
@@ -841,64 +675,6 @@ public class ExplainTask extends Task<ExplainWork> implements Serializable {
           }
         }
         if (invokeFlag) {
-          Vectorization vectorization = xpl_note.vectorization();
-          if (this.work != null && this.work.isVectorization()) {
-
-            // The EXPLAIN VECTORIZATION option was specified.
-            final boolean desireOnly = this.work.isVectorizationOnly();
-            final VectorizationDetailLevel desiredVecDetailLevel =
-                this.work.isVectorizationDetailLevel();
-
-            switch (vectorization) {
-            case NON_VECTORIZED:
-              // Display all non-vectorized leaf objects unless ONLY.
-              if (desireOnly) {
-                invokeFlag = false;
-              }
-              break;
-            case SUMMARY:
-            case OPERATOR:
-            case EXPRESSION:
-            case DETAIL:
-              if (vectorization.rank < desiredVecDetailLevel.rank) {
-                // This detail not desired.
-                invokeFlag = false;
-              }
-              break;
-            case SUMMARY_PATH:
-            case OPERATOR_PATH:
-              if (desireOnly) {
-                if (vectorization.rank < desiredVecDetailLevel.rank) {
-                  // Suppress headers and all objects below.
-                  invokeFlag = false;
-                }
-              }
-              break;
-            default:
-              throw new RuntimeException("Unknown EXPLAIN vectorization " + vectorization);
-            }
-          } else  {
-            // Do not display vectorization objects.
-            switch (vectorization) {
-            case SUMMARY:
-            case OPERATOR:
-            case EXPRESSION:
-            case DETAIL:
-              invokeFlag = false;
-              break;
-            case NON_VECTORIZED:
-              // No action.
-              break;
-            case SUMMARY_PATH:
-            case OPERATOR_PATH:
-              // Always include headers since they contain non-vectorized objects, too.
-              break;
-            default:
-              throw new RuntimeException("Unknown EXPLAIN vectorization " + vectorization);
-            }
-          }
-        }
-        if (invokeFlag) {
 
           Object val = null;
           try {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index c070c4a..416606e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -93,7 +93,7 @@ public class MapJoinOperator extends AbstractMapJoinOperator<MapJoinDesc> implem
   protected transient ReusableGetAdaptor[] hashMapRowGetters;
 
   private UnwrapRowContainer[] unwrapContainer;
-  protected transient Configuration hconf;
+  private transient Configuration hconf;
   private transient boolean hybridMapJoinLeftover;  // whether there's spilled data to be processed
   protected transient MapJoinBytesTableContainer[] spilledMapJoinTables;  // used to hold restored
                                                                           // spilled small tables

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index af1fa66..038b96c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -41,8 +41,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc;
 import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator;
-import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc;
-import org.apache.hadoop.hive.ql.plan.AbstractVectorDesc;
 import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc;
 import org.apache.hadoop.hive.ql.plan.CollectDesc;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -75,7 +73,6 @@ import org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc;
 import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.plan.UDTFDesc;
 import org.apache.hadoop.hive.ql.plan.UnionDesc;
-import org.apache.hadoop.hive.ql.plan.VectorDesc;
 
 import com.google.common.base.Preconditions;
 
@@ -145,8 +142,6 @@ public final class OperatorFactory {
     Class<? extends Operator<?>> opClass, CompilationOpContext cContext, T conf,
         VectorizationContext vContext) throws HiveException {
     try {
-      VectorDesc vectorDesc = ((AbstractOperatorDesc) conf).getVectorDesc();
-      vectorDesc.setVectorOp(opClass);
       Operator<T> op = (Operator<T>) opClass.getDeclaredConstructor(
           CompilationOpContext.class, VectorizationContext.class, OperatorDesc.class)
           .newInstance(cContext, vContext, conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
index 42c7d36..9049ddd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
@@ -201,4 +201,5 @@ public class SelectOperator extends Operator<SelectDesc> implements Serializable
 
     return true;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
index cbe83be..9f27f56 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java
@@ -93,7 +93,9 @@ public abstract class MapJoinKey {
     return true;
   }
 
-  public static boolean isSupportedField(TypeInfo typeInfo) {
+  public static boolean isSupportedField(String typeName) {
+    TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
+
     if (typeInfo.getCategory() != Category.PRIMITIVE) return false; // not supported
     PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
     PrimitiveCategory pc = primitiveTypeInfo.getPrimitiveCategory();
@@ -101,11 +103,6 @@ public abstract class MapJoinKey {
     return true;
   }
 
-  public static boolean isSupportedField(String typeName) {
-    TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
-    return isSupportedField(typeInfo);
-  }
-
 
   public static MapJoinKey readFromVector(Output output, MapJoinKey key, Object[] keyObject,
       List<ObjectInspector> keyOIs, boolean mayReuseKey) throws HiveException {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
index 3cf6561..1634f42 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
@@ -75,7 +75,7 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
     this.desc = joinOp.getConf();
     if (desc.getVectorMode() && HiveConf.getBoolVar(
         hconf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) {
-      VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
+      VectorMapJoinDesc vectorDesc = desc.getVectorDesc();
       useFastContainer = vectorDesc != null && vectorDesc.hashTableImplementationType() ==
           VectorMapJoinDesc.HashTableImplementationType.FAST;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnMapping.java
index c890674..c4b95c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnMapping.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.ql.exec.vector;
 
 import java.util.Arrays;
 
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-
 /**
  * This class collects column information for copying a row from one VectorizedRowBatch to
  * same/another batch.
@@ -32,7 +30,7 @@ public abstract class VectorColumnMapping {
 
   protected int[] sourceColumns;
   protected int[] outputColumns;
-  protected TypeInfo[] typeInfos;
+  protected String[] typeNames;
 
   protected VectorColumnOrderedMap vectorColumnMapping;
 
@@ -40,7 +38,7 @@ public abstract class VectorColumnMapping {
     this.vectorColumnMapping = new VectorColumnOrderedMap(name);
   }
 
-  public abstract void add(int sourceColumn, int outputColumn, TypeInfo typeInfo);
+  public abstract void add(int sourceColumn, int outputColumn, String typeName);
 
   public abstract void finalize();
 
@@ -56,8 +54,8 @@ public abstract class VectorColumnMapping {
     return outputColumns;
   }
 
-  public TypeInfo[] getTypeInfos() {
-    return typeInfos;
+  public String[] getTypeNames() {
+    return typeNames;
   }
 
   @Override
@@ -67,7 +65,7 @@ public abstract class VectorColumnMapping {
     sb.append(", ");
     sb.append("output columns: " + Arrays.toString(outputColumns));
     sb.append(", ");
-    sb.append("type infos: " + Arrays.toString(typeInfos));
+    sb.append("type names: " + Arrays.toString(typeNames));
     return sb.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java
index 97d55f5..0e6014b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOrderedMap.java
@@ -23,10 +23,8 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.commons.lang.ArrayUtils;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 /**
  * This class collects column information for mapping vector columns, including the hive type name.
@@ -45,17 +43,17 @@ public class VectorColumnOrderedMap {
   private class Value {
     int valueColumn;
 
-    TypeInfo typeInfo;
+    String typeName;
 
-    Value(int valueColumn, TypeInfo typeInfo) {
+    Value(int valueColumn, String typeName) {
       this.valueColumn = valueColumn;
-      this.typeInfo = typeInfo;
+      this.typeName = typeName;
     }
 
     public String toString() {
       StringBuilder sb = new StringBuilder();
       sb.append("(value column: " + valueColumn);
-      sb.append(", type info: " + typeInfo.toString() + ")");
+      sb.append(", type name: " + typeName + ")");
       return sb.toString();
     }
   }
@@ -64,12 +62,12 @@ public class VectorColumnOrderedMap {
 
     private final int[] orderedColumns;
     private final int[] valueColumns;
-    private final TypeInfo[] typeInfos;
+    private final String[] typeNames;
 
-    Mapping(int[] orderedColumns, int[] valueColumns, TypeInfo[] typeInfos) {
+    Mapping(int[] orderedColumns, int[] valueColumns, String[] typeNames) {
       this.orderedColumns = orderedColumns;
       this.valueColumns = valueColumns;
-      this.typeInfos = typeInfos;
+      this.typeNames = typeNames;
     }
 
     public int getCount() {
@@ -84,8 +82,8 @@ public class VectorColumnOrderedMap {
       return valueColumns;
     }
 
-    public TypeInfo[] getTypeInfos() {
-      return typeInfos;
+    public String[] getTypeNames() {
+      return typeNames;
     }
   }
 
@@ -94,14 +92,14 @@ public class VectorColumnOrderedMap {
     orderedTreeMap = new TreeMap<Integer, Value>();
   }
 
-  public void add(int orderedColumn, int valueColumn, TypeInfo typeInfo) {
+  public void add(int orderedColumn, int valueColumn, String typeName) {
     if (orderedTreeMap.containsKey(orderedColumn)) {
       throw new RuntimeException(
           name + " duplicate column " + orderedColumn +
           " in ordered column map " + orderedTreeMap.toString() +
-          " when adding value column " + valueColumn + ", type into " + typeInfo.toString());
+          " when adding value column " + valueColumn + ", type " + typeName);
     }
-    orderedTreeMap.put(orderedColumn, new Value(valueColumn, typeInfo));
+    orderedTreeMap.put(orderedColumn, new Value(valueColumn, typeName));
   }
 
   public boolean orderedColumnsContain(int orderedColumn) {
@@ -111,16 +109,17 @@ public class VectorColumnOrderedMap {
   public Mapping getMapping() {
     ArrayList<Integer> orderedColumns = new ArrayList<Integer>();
     ArrayList<Integer> valueColumns = new ArrayList<Integer>();
-    ArrayList<TypeInfo> typeInfos = new ArrayList<TypeInfo>();
+    ArrayList<String> typeNames = new ArrayList<String>();
     for (Map.Entry<Integer, Value> entry : orderedTreeMap.entrySet()) {
       orderedColumns.add(entry.getKey());
       Value value = entry.getValue();
       valueColumns.add(value.valueColumn);
-      typeInfos.add(value.typeInfo);
+      typeNames.add(value.typeName);
     }
     return new Mapping(
             ArrayUtils.toPrimitive(orderedColumns.toArray(new Integer[0])),
             ArrayUtils.toPrimitive(valueColumns.toArray(new Integer[0])),
-            typeInfos.toArray(new TypeInfo[0]));
+            typeNames.toArray(new String[0]));
+    
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOutputMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOutputMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOutputMapping.java
index 4ceff6b..f35aff7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOutputMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnOutputMapping.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOrderedMap.Mapping;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 /**
  * This class collects column information for copying a row from one VectorizedRowBatch to
@@ -36,9 +35,9 @@ public class VectorColumnOutputMapping extends VectorColumnMapping {
   }
 
   @Override
-  public void add(int sourceColumn, int outputColumn, TypeInfo typeInfo) {
+  public void add(int sourceColumn, int outputColumn, String typeName) {
     // Order on outputColumn.
-    vectorColumnMapping.add(outputColumn, sourceColumn, typeInfo);
+    vectorColumnMapping.add(outputColumn, sourceColumn, typeName);
   }
 
   public boolean containsOutputColumn(int outputColumn) {
@@ -52,7 +51,7 @@ public class VectorColumnOutputMapping extends VectorColumnMapping {
     // Ordered columns are the output columns.
     sourceColumns = mapping.getValueColumns();
     outputColumns = mapping.getOrderedColumns();
-    typeInfos = mapping.getTypeInfos();
+    typeNames = mapping.getTypeNames();
 
     // Not needed anymore.
     vectorColumnMapping = null;


[58/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQuery.java
deleted file mode 100644
index 43982aa..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQuery.java
+++ /dev/null
@@ -1,1053 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-import java.io.IOException;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptCost;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.rel.RelFieldCollation;
-import org.apache.calcite.rel.RelFieldCollation.Direction;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.RelWriter;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.metadata.RelMetadataQuery;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Litmus;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-import org.apache.hadoop.hive.conf.Constants;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateGranularity;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.joda.time.Interval;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonGenerator;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-
-/**
- * Relational expression representing a scan of a Druid data set.
- *
- * TODO: to be removed when Calcite is upgraded to 1.9
- */
-public class DruidQuery extends TableScan {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(DruidQuery.class);
-
-  protected QuerySpec querySpec;
-
-  final DruidTable druidTable;
-  final List<Interval> intervals;
-  final ImmutableList<RelNode> rels;
-
-  private static final Pattern VALID_SIG = Pattern.compile("sf?p?a?l?");
-
-  /**
-   * Creates a DruidQuery.
-   *
-   * @param cluster        Cluster
-   * @param traitSet       Traits
-   * @param table          Table
-   * @param druidTable     Druid table
-   * @param interval       Interval for the query
-   * @param rels           Internal relational expressions
-   */
-  private DruidQuery(RelOptCluster cluster, RelTraitSet traitSet,
-      RelOptTable table, DruidTable druidTable,
-      List<Interval> intervals, List<RelNode> rels) {
-    super(cluster, traitSet, table);
-    this.druidTable = druidTable;
-    this.intervals = ImmutableList.copyOf(intervals);
-    this.rels = ImmutableList.copyOf(rels);
-
-    assert isValid(Litmus.THROW);
-  }
-
-  /** Returns a string describing the operations inside this query.
-   *
-   * <p>For example, "sfpal" means {@link TableScan} (s)
-   * followed by {@link Filter} (f)
-   * followed by {@link Project} (p)
-   * followed by {@link Aggregate} (a)
-   * followed by {@link Sort} (l).
-   *
-   * @see #isValidSignature(String)
-   */
-  String signature() {
-    final StringBuilder b = new StringBuilder();
-    for (RelNode rel : rels) {
-      b.append(rel instanceof TableScan ? 's'
-          : rel instanceof Project ? 'p'
-          : rel instanceof Filter ? 'f'
-          : rel instanceof Aggregate ? 'a'
-          : rel instanceof Sort ? 'l'
-          : '!');
-    }
-    return b.toString();
-  }
-
-  @Override public boolean isValid(Litmus litmus) {
-    if (!super.isValid(litmus)) {
-      return false;
-    }
-    final String signature = signature();
-    if (!isValidSignature(signature)) {
-      return litmus.fail("invalid signature");
-    }
-    if (rels.isEmpty()) {
-      return litmus.fail("must have at least one rel");
-    }
-    for (int i = 0; i < rels.size(); i++) {
-      final RelNode r = rels.get(i);
-      if (i == 0) {
-        if (!(r instanceof TableScan)) {
-          return litmus.fail("first rel must be TableScan");
-        }
-        if (r.getTable() != table) {
-          return litmus.fail("first rel must be based on table table");
-        }
-      } else {
-        final List<RelNode> inputs = r.getInputs();
-        if (inputs.size() != 1 || inputs.get(0) != rels.get(i - 1)) {
-          return litmus.fail("each rel must have a single input");
-        }
-        if (r instanceof Aggregate) {
-          final Aggregate aggregate = (Aggregate) r;
-          if (aggregate.getGroupSets().size() != 1
-              || aggregate.indicator) {
-            return litmus.fail("no grouping sets");
-          }
-          for (AggregateCall call : aggregate.getAggCallList()) {
-            if (call.filterArg >= 0) {
-              return litmus.fail("no filtered aggregate functions");
-            }
-          }
-        }
-        if (r instanceof Filter) {
-          final Filter filter = (Filter) r;
-          if (!isValidFilter(filter.getCondition())) {
-            return litmus.fail("invalid filter");
-          }
-        }
-        if (r instanceof Sort) {
-          final Sort sort = (Sort) r;
-          if (sort.offset != null && RexLiteral.intValue(sort.offset) != 0) {
-            return litmus.fail("offset not supported");
-          }
-        }
-      }
-    }
-    return true;
-  }
-
-  boolean isValidFilter(RexNode e) {
-    switch (e.getKind()) {
-    case INPUT_REF:
-    case LITERAL:
-      return true;
-    case AND:
-    case OR:
-    case NOT:
-    case EQUALS:
-    case LESS_THAN:
-    case LESS_THAN_OR_EQUAL:
-    case GREATER_THAN:
-    case GREATER_THAN_OR_EQUAL:
-    case BETWEEN:
-    case IN:
-    case CAST:
-      return areValidFilters(((RexCall) e).getOperands());
-    default:
-      return false;
-    }
-  }
-
-  private boolean areValidFilters(List<RexNode> es) {
-    for (RexNode e : es) {
-      if (!isValidFilter(e)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /** Returns whether a signature represents an sequence of relational operators
-   * that can be translated into a valid Druid query. */
-  static boolean isValidSignature(String signature) {
-    return VALID_SIG.matcher(signature).matches();
-  }
-
-  /** Creates a DruidQuery. */
-  public static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
-      RelOptTable table, DruidTable druidTable, List<RelNode> rels) {
-    return new DruidQuery(cluster, traitSet, table, druidTable, druidTable.intervals, rels);
-  }
-
-  /** Creates a DruidQuery. */
-  private static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
-      RelOptTable table, DruidTable druidTable, List<Interval> intervals, List<RelNode> rels) {
-    return new DruidQuery(cluster, traitSet, table, druidTable, intervals, rels);
-  }
-
-  /** Extends a DruidQuery. */
-  public static DruidQuery extendQuery(DruidQuery query, RelNode r) {
-    final ImmutableList.Builder<RelNode> builder = ImmutableList.builder();
-    return DruidQuery.create(query.getCluster(), query.getTraitSet(), query.getTable(),
-            query.druidTable, query.intervals, builder.addAll(query.rels).add(r).build());
-  }
-
-  /** Extends a DruidQuery. */
-  public static DruidQuery extendQuery(DruidQuery query, List<Interval> intervals) {
-    return DruidQuery.create(query.getCluster(), query.getTraitSet(), query.getTable(),
-            query.druidTable, intervals, query.rels);
-  }
-
-  @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
-    assert inputs.isEmpty();
-    return this;
-  }
-
-  @Override public RelDataType deriveRowType() {
-    return getCluster().getTypeFactory().createStructType(
-            Pair.right(Util.last(rels).getRowType().getFieldList()),
-            getQuerySpec().fieldNames);
-  }
-
-  public TableScan getTableScan() {
-    return (TableScan) rels.get(0);
-  }
-
-  public RelNode getTopNode() {
-    return Util.last(rels);
-  }
-
-  @Override public RelOptTable getTable() {
-    return table;
-  }
-
-  @Override public RelWriter explainTerms(RelWriter pw) {
-    for (RelNode rel : rels) {
-      if (rel instanceof TableScan) {
-        TableScan tableScan = (TableScan) rel;
-        pw.item("table", tableScan.getTable().getQualifiedName());
-        pw.item("intervals", intervals);
-      } else if (rel instanceof Filter) {
-        pw.item("filter", ((Filter) rel).getCondition());
-      } else if (rel instanceof Project) {
-        pw.item("projects", ((Project) rel).getProjects());
-      } else if (rel instanceof Aggregate) {
-        final Aggregate aggregate = (Aggregate) rel;
-        pw.item("groups", aggregate.getGroupSet())
-            .item("aggs", aggregate.getAggCallList());
-      } else if (rel instanceof Sort) {
-        final Sort sort = (Sort) rel;
-        for (Ord<RelFieldCollation> ord
-                : Ord.zip(sort.collation.getFieldCollations())) {
-          pw.item("sort" + ord.i, ord.e.getFieldIndex());
-        }
-        for (Ord<RelFieldCollation> ord
-            : Ord.zip(sort.collation.getFieldCollations())) {
-          pw.item("dir" + ord.i, ord.e.shortString());
-        }
-        pw.itemIf("fetch", sort.fetch, sort.fetch != null);
-      } else {
-        throw new AssertionError("rel type not supported in Druid query "
-            + rel);
-      }
-    }
-    return pw;
-  }
-
-  @Override public RelOptCost computeSelfCost(RelOptPlanner planner,
-      RelMetadataQuery mq) {
-    // Heuristic: we assume pushing query to Druid reduces cost by 90%
-    return Util.last(rels).computeSelfCost(planner, mq).multiplyBy(.1);
-  }
-
-  @Override public RelNode project(ImmutableBitSet fieldsUsed,
-      Set<RelDataTypeField> extraFields,
-      RelBuilder relBuilder) {
-    final int fieldCount = getRowType().getFieldCount();
-    if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount))
-        && extraFields.isEmpty()) {
-      return this;
-    }
-    final List<RexNode> exprList = new ArrayList<>();
-    final List<String> nameList = new ArrayList<>();
-    final RexBuilder rexBuilder = getCluster().getRexBuilder();
-    final List<RelDataTypeField> fields = getRowType().getFieldList();
-
-    // Project the subset of fields.
-    for (int i : fieldsUsed) {
-      RelDataTypeField field = fields.get(i);
-      exprList.add(rexBuilder.makeInputRef(this, i));
-      nameList.add(field.getName());
-    }
-
-    // Project nulls for the extra fields. (Maybe a sub-class table has
-    // extra fields, but we don't.)
-    for (RelDataTypeField extraField : extraFields) {
-      exprList.add(
-          rexBuilder.ensureType(
-              extraField.getType(),
-              rexBuilder.constantNull(),
-              true));
-      nameList.add(extraField.getName());
-    }
-
-    HiveProject hp = (HiveProject) relBuilder.push(this).project(exprList, nameList).build();
-    hp.setSynthetic();
-    return hp;
-  }
-
-  public QuerySpec getQuerySpec() {
-    if (querySpec == null) {
-      querySpec = deriveQuerySpec();
-      assert querySpec != null : this;
-    }
-    return querySpec;
-  }
-
-  protected QuerySpec deriveQuerySpec() {
-    final RelDataType rowType = table.getRowType();
-    int i = 1;
-
-    RexNode filter = null;
-    if (i < rels.size() && rels.get(i) instanceof Filter) {
-      final Filter filterRel = (Filter) rels.get(i++);
-      filter = filterRel.getCondition();
-    }
-
-    List<RexNode> projects = null;
-    if (i < rels.size() && rels.get(i) instanceof Project) {
-      final Project project = (Project) rels.get(i++);
-      projects = project.getProjects();
-    }
-
-    ImmutableBitSet groupSet = null;
-    List<AggregateCall> aggCalls = null;
-    List<String> aggNames = null;
-    if (i < rels.size() && rels.get(i) instanceof Aggregate) {
-      final Aggregate aggregate = (Aggregate) rels.get(i++);
-      groupSet = aggregate.getGroupSet();
-      aggCalls = aggregate.getAggCallList();
-      aggNames = Util.skip(aggregate.getRowType().getFieldNames(),
-          groupSet.cardinality());
-    }
-
-    List<Integer> collationIndexes = null;
-    List<Direction> collationDirections = null;
-    Integer fetch = null;
-    if (i < rels.size() && rels.get(i) instanceof Sort) {
-      final Sort sort = (Sort) rels.get(i++);
-      collationIndexes = new ArrayList<>();
-      collationDirections = new ArrayList<>();
-      for (RelFieldCollation fCol: sort.collation.getFieldCollations()) {
-        collationIndexes.add(fCol.getFieldIndex());
-        collationDirections.add(fCol.getDirection());
-      }
-      fetch = sort.fetch != null ? RexLiteral.intValue(sort.fetch) : null;
-    }
-
-    if (i != rels.size()) {
-      throw new AssertionError("could not implement all rels");
-    }
-
-    return getQuery(rowType, filter, projects, groupSet, aggCalls, aggNames,
-            collationIndexes, collationDirections, fetch);
-  }
-
-  public String getQueryType() {
-    return getQuerySpec().queryType.getQueryName();
-  }
-
-  public String getQueryString() {
-    return getQuerySpec().queryString;
-  }
-
-  private QuerySpec getQuery(RelDataType rowType, RexNode filter, List<RexNode> projects,
-      ImmutableBitSet groupSet, List<AggregateCall> aggCalls, List<String> aggNames,
-      List<Integer> collationIndexes, List<Direction> collationDirections, Integer fetch) {
-    DruidQueryType queryType = DruidQueryType.SELECT;
-    final Translator translator = new Translator(druidTable, rowType);
-    List<String> fieldNames = rowType.getFieldNames();
-
-    // Handle filter
-    Json jsonFilter = null;
-    if (filter != null) {
-      jsonFilter = translator.translateFilter(filter);
-    }
-
-    // Then we handle project
-    if (projects != null) {
-      translator.metrics.clear();
-      translator.dimensions.clear();
-      final ImmutableList.Builder<String> builder = ImmutableList.builder();
-      for (RexNode project : projects) {
-        builder.add(translator.translate(project, true));
-      }
-      fieldNames = builder.build();
-    }
-
-    // Finally we handle aggregate and sort. Handling of these
-    // operators is more complex, since we need to extract
-    // the conditions to know whether the query will be
-    // executed as a Timeseries, TopN, or GroupBy in Druid
-    final List<String> dimensions = new ArrayList<>();
-    final List<JsonAggregation> aggregations = new ArrayList<>();
-    String granularity = "ALL";
-    Direction timeSeriesDirection = null;
-    JsonLimit limit = null;
-    if (groupSet != null) {
-      assert aggCalls != null;
-      assert aggNames != null;
-      assert aggCalls.size() == aggNames.size();
-
-      int timePositionIdx = -1;
-      final ImmutableList.Builder<String> builder = ImmutableList.builder();
-      if (projects != null) {
-        for (int groupKey : groupSet) {
-          final String s = fieldNames.get(groupKey);
-          final RexNode project = projects.get(groupKey);
-          if (project instanceof RexInputRef) {
-            // Reference, it could be to the timestamp column or any other dimension
-            final RexInputRef ref = (RexInputRef) project;
-            final String origin = druidTable.rowType.getFieldList().get(ref.getIndex()).getName();
-            if (origin.equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
-              granularity = "NONE";
-              builder.add(s);
-              assert timePositionIdx == -1;
-              timePositionIdx = groupKey;
-            } else {
-              dimensions.add(s);
-              builder.add(s);
-            }
-          } else if (project instanceof RexCall) {
-            // Call, check if we should infer granularity
-            RexCall call = (RexCall) project;
-            if (HiveDateGranularity.ALL_FUNCTIONS.contains(call.getOperator())) {
-              granularity = call.getOperator().getName();
-              builder.add(s);
-              assert timePositionIdx == -1;
-              timePositionIdx = groupKey;
-            } else {
-              dimensions.add(s);
-              builder.add(s);
-            }
-          } else {
-            throw new AssertionError("incompatible project expression: " + project);
-          }
-        }
-      } else {
-        for (int groupKey : groupSet) {
-          final String s = fieldNames.get(groupKey);
-          if (s.equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
-            granularity = "NONE";
-            builder.add(s);
-            assert timePositionIdx == -1;
-            timePositionIdx = groupKey;
-          } else {
-            dimensions.add(s);
-            builder.add(s);
-          }
-        }
-      }
-
-      for (Pair<AggregateCall, String> agg : Pair.zip(aggCalls, aggNames)) {
-        final JsonAggregation jsonAggregation =
-            getJsonAggregation(fieldNames, agg.right, agg.left);
-        aggregations.add(jsonAggregation);
-        builder.add(jsonAggregation.name);
-      }
-
-      fieldNames = builder.build();
-
-      ImmutableList<JsonCollation> collations = null;
-      boolean sortsMetric = false;
-      if (collationIndexes != null) {
-        assert collationDirections != null;
-        ImmutableList.Builder<JsonCollation> colBuilder = new ImmutableList.Builder<JsonCollation>();
-        for (Pair<Integer,Direction> p : Pair.zip(collationIndexes, collationDirections)) {
-          colBuilder.add(new JsonCollation(fieldNames.get(p.left),
-                  p.right == Direction.DESCENDING ? "descending" : "ascending"));
-          if (p.left >= groupSet.cardinality() && p.right == Direction.DESCENDING) {
-            // Currently only support for DESC in TopN
-            sortsMetric = true;
-          } else if (p.left == timePositionIdx) {
-            assert timeSeriesDirection == null;
-            timeSeriesDirection = p.right;
-          }
-        }
-        collations = colBuilder.build();
-      }
-
-      limit = new JsonLimit("default", fetch, collations);
-
-      if (dimensions.isEmpty() && (collations == null || timeSeriesDirection != null)) {
-        queryType = DruidQueryType.TIMESERIES;
-        assert fetch == null;
-      } else if (dimensions.size() == 1 && sortsMetric && collations.size() == 1 && fetch != null) {
-        queryType = DruidQueryType.TOP_N;
-      } else {
-        queryType = DruidQueryType.GROUP_BY;
-      }
-    } else {
-      assert aggCalls == null;
-      assert aggNames == null;
-      assert collationIndexes == null || collationIndexes.isEmpty();
-      assert collationDirections == null || collationDirections.isEmpty();
-    }
-
-    final StringWriter sw = new StringWriter();
-    final JsonFactory factory = new JsonFactory();
-    try {
-      final JsonGenerator generator = factory.createGenerator(sw);
-
-      switch (queryType) {
-      case TIMESERIES:
-        generator.writeStartObject();
-
-        generator.writeStringField("queryType", "timeseries");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeStringField("descending", timeSeriesDirection != null &&
-            timeSeriesDirection == Direction.DESCENDING ? "true" : "false");
-        generator.writeStringField("granularity", granularity);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", null);
-        writeField(generator, "intervals", intervals);
-
-        generator.writeEndObject();
-        break;
-
-      case TOP_N:
-        generator.writeStartObject();
-
-        generator.writeStringField("queryType", "topN");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeStringField("granularity", granularity);
-        generator.writeStringField("dimension", dimensions.get(0));
-        generator.writeStringField("metric", fieldNames.get(collationIndexes.get(0)));
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", null);
-        writeField(generator, "intervals", intervals);
-        generator.writeNumberField("threshold", fetch);
-
-        generator.writeEndObject();
-        break;
-
-      case GROUP_BY:
-        generator.writeStartObject();
-
-        if (aggregations.isEmpty()) {
-          // Druid requires at least one aggregation, otherwise gives:
-          //   Must have at least one AggregatorFactory
-          aggregations.add(
-              new JsonAggregation("longSum", "dummy_agg", "dummy_agg"));
-        }
-
-        generator.writeStringField("queryType", "groupBy");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeStringField("granularity", granularity);
-        writeField(generator, "dimensions", dimensions);
-        writeFieldIf(generator, "limitSpec", limit);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", null);
-        writeField(generator, "intervals", intervals);
-        writeFieldIf(generator, "having", null);
-
-        generator.writeEndObject();
-        break;
-
-      case SELECT:
-        generator.writeStartObject();
-
-        generator.writeStringField("queryType", "select");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeStringField("descending", "false");
-        writeField(generator, "intervals", intervals);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "dimensions", translator.dimensions);
-        writeField(generator, "metrics", translator.metrics);
-        generator.writeStringField("granularity", granularity);
-
-        generator.writeFieldName("pagingSpec");
-        generator.writeStartObject();
-        generator.writeNumberField("threshold", fetch != null ? fetch : 1);
-        generator.writeEndObject();
-
-        generator.writeFieldName("context");
-        generator.writeStartObject();
-        generator.writeBooleanField(Constants.DRUID_QUERY_FETCH, fetch != null);
-        generator.writeEndObject();
-
-        generator.writeEndObject();
-        break;
-
-      default:
-        throw new AssertionError("unknown query type " + queryType);
-      }
-
-      generator.close();
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-
-    return new QuerySpec(queryType, sw.toString(), fieldNames);
-  }
-
-  private JsonAggregation getJsonAggregation(List<String> fieldNames,
-      String name, AggregateCall aggCall) {
-    final List<String> list = new ArrayList<>();
-    for (Integer arg : aggCall.getArgList()) {
-      list.add(fieldNames.get(arg));
-    }
-    final String only = Iterables.getFirst(list, null);
-    final boolean b = aggCall.getType().getSqlTypeName() == SqlTypeName.DOUBLE;
-    switch (aggCall.getAggregation().getKind()) {
-    case COUNT:
-      if (aggCall.isDistinct()) {
-        return new JsonCardinalityAggregation("cardinality", name, list);
-      }
-      return new JsonAggregation("count", name, only);
-    case SUM:
-    case SUM0:
-      return new JsonAggregation(b ? "doubleSum" : "longSum", name, only);
-    case MIN:
-      return new JsonAggregation(b ? "doubleMin" : "longMin", name, only);
-    case MAX:
-      return new JsonAggregation(b ? "doubleMax" : "longMax", name, only);
-    default:
-      throw new AssertionError("unknown aggregate " + aggCall);
-    }
-  }
-
-  private static void writeField(JsonGenerator generator, String fieldName,
-      Object o) throws IOException {
-    generator.writeFieldName(fieldName);
-    writeObject(generator, o);
-  }
-
-  private static void writeFieldIf(JsonGenerator generator, String fieldName,
-      Object o) throws IOException {
-    if (o != null) {
-      writeField(generator, fieldName, o);
-    }
-  }
-
-  private static void writeArray(JsonGenerator generator, List<?> elements)
-      throws IOException {
-    generator.writeStartArray();
-    for (Object o : elements) {
-      writeObject(generator, o);
-    }
-    generator.writeEndArray();
-  }
-
-  private static void writeObject(JsonGenerator generator, Object o)
-      throws IOException {
-    if (o instanceof String) {
-      String s = (String) o;
-      generator.writeString(s);
-    } else if (o instanceof Interval) {
-      Interval i = (Interval) o;
-      generator.writeString(i.toString());
-    } else if (o instanceof Integer) {
-      Integer i = (Integer) o;
-      generator.writeNumber(i);
-    } else if (o instanceof List) {
-      writeArray(generator, (List<?>) o);
-    } else if (o instanceof Json) {
-      ((Json) o).write(generator);
-    } else {
-      throw new AssertionError("not a json object: " + o);
-    }
-  }
-
-  /** Druid query specification. */
-  public static class QuerySpec {
-    final DruidQueryType queryType;
-    final String queryString;
-    final List<String> fieldNames;
-
-    QuerySpec(DruidQueryType queryType, String queryString,
-        List<String> fieldNames) {
-      this.queryType = Preconditions.checkNotNull(queryType);
-      this.queryString = Preconditions.checkNotNull(queryString);
-      this.fieldNames = ImmutableList.copyOf(fieldNames);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(queryType, queryString, fieldNames);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof QuerySpec
-          && queryType == ((QuerySpec) obj).queryType
-          && queryString.equals(((QuerySpec) obj).queryString)
-          && fieldNames.equals(((QuerySpec) obj).fieldNames);
-    }
-
-    @Override public String toString() {
-      return "{queryType: " + queryType
-          + ", queryString: " + queryString
-          + ", fieldNames: " + fieldNames + "}";
-    }
-
-    String getQueryString(String pagingIdentifier, int offset) {
-      if (pagingIdentifier == null) {
-        return queryString;
-      }
-      return queryString.replace("\"threshold\":",
-          "\"pagingIdentifiers\":{\"" + pagingIdentifier + "\":" + offset
-              + "},\"threshold\":");
-    }
-  }
-
-  /** Translates scalar expressions to Druid field references. */
-  private static class Translator {
-    final List<String> dimensions = new ArrayList<>();
-    final List<String> metrics = new ArrayList<>();
-    final DruidTable druidTable;
-    final RelDataType rowType;
-
-    Translator(DruidTable druidTable, RelDataType rowType) {
-      this.druidTable = druidTable;
-      this.rowType = rowType;
-      for (RelDataTypeField f : rowType.getFieldList()) {
-        final String fieldName = f.getName();
-        if (druidTable.metricFieldNames.contains(fieldName)) {
-          metrics.add(fieldName);
-        } else if (!DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(fieldName)) {
-          dimensions.add(fieldName);
-        }
-      }
-    }
-
-    String translate(RexNode e, boolean set) {
-      switch (e.getKind()) {
-      case INPUT_REF:
-        final RexInputRef ref = (RexInputRef) e;
-        final String fieldName =
-            rowType.getFieldList().get(ref.getIndex()).getName();
-        if (set) {
-          if (druidTable.metricFieldNames.contains(fieldName)) {
-            metrics.add(fieldName);
-          } else if (!DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(fieldName)) {
-            dimensions.add(fieldName);
-          }
-        }
-        return fieldName;
-
-      case CAST:
-       return tr(e, 0, set);
-
-      case LITERAL:
-        return ((RexLiteral) e).getValue2().toString();
-
-      case OTHER_FUNCTION:
-        final RexCall call = (RexCall) e;
-        assert HiveDateGranularity.ALL_FUNCTIONS.contains(call.getOperator());
-        return tr(call, 0, set);
-
-      default:
-        throw new AssertionError("invalid expression " + e);
-      }
-    }
-
-    @SuppressWarnings("incomplete-switch")
-    private JsonFilter translateFilter(RexNode e) {
-      RexCall call;
-      switch (e.getKind()) {
-      case EQUALS:
-      case NOT_EQUALS:
-      case GREATER_THAN:
-      case GREATER_THAN_OR_EQUAL:
-      case LESS_THAN:
-      case LESS_THAN_OR_EQUAL:
-        call = (RexCall) e;
-        int posRef;
-        int posConstant;
-        if (RexUtil.isConstant(call.getOperands().get(1))) {
-          posRef = 0;
-          posConstant = 1;
-        } else if (RexUtil.isConstant(call.getOperands().get(0))) {
-          posRef = 1;
-          posConstant = 0;
-        } else {
-          throw new AssertionError("it is not a valid comparison: " + e);
-        }
-        switch (e.getKind()) {
-        case EQUALS:
-          return new JsonSelector("selector", tr(e, posRef), tr(e, posConstant));
-        case NOT_EQUALS:
-          return new JsonCompositeFilter("not",
-              ImmutableList.of(new JsonSelector("selector", tr(e, posRef), tr(e, posConstant))));
-        case GREATER_THAN:
-          return new JsonBound("bound", tr(e, posRef), tr(e, posConstant), true, null, false,
-              false);
-        case GREATER_THAN_OR_EQUAL:
-          return new JsonBound("bound", tr(e, posRef), tr(e, posConstant), false, null, false,
-              false);
-        case LESS_THAN:
-          return new JsonBound("bound", tr(e, posRef), null, false, tr(e, posConstant), true,
-              false);
-        case LESS_THAN_OR_EQUAL:
-          return new JsonBound("bound", tr(e, posRef), null, false, tr(e, posConstant), false,
-              false);
-        }
-      case AND:
-      case OR:
-      case NOT:
-        call = (RexCall) e;
-        return new JsonCompositeFilter(e.getKind().toString().toLowerCase(),
-            translateFilters(call.getOperands()));
-      default:
-        throw new AssertionError("cannot translate filter: " + e);
-      }
-    }
-
-    private String tr(RexNode call, int index) {
-      return tr(call, index, false);
-    }
-
-    private String tr(RexNode call, int index, boolean set) {
-      return translate(((RexCall) call).getOperands().get(index), set);
-    }
-
-    private List<JsonFilter> translateFilters(List<RexNode> operands) {
-      final ImmutableList.Builder<JsonFilter> builder =
-          ImmutableList.builder();
-      for (RexNode operand : operands) {
-        builder.add(translateFilter(operand));
-      }
-      return builder.build();
-    }
-  }
-
-  /** Object that knows how to write itself to a
-   * {@link com.fasterxml.jackson.core.JsonGenerator}. */
-  private interface Json {
-    void write(JsonGenerator generator) throws IOException;
-  }
-
-  /** Aggregation element of a Druid "groupBy" or "topN" query. */
-  private static class JsonAggregation implements Json {
-    final String type;
-    final String name;
-    final String fieldName;
-
-    private JsonAggregation(String type, String name, String fieldName) {
-      this.type = type;
-      this.name = name;
-      this.fieldName = fieldName;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      generator.writeStringField("name", name);
-      writeFieldIf(generator, "fieldName", fieldName);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Collation element of a Druid "groupBy" query. */
-  private static class JsonLimit implements Json {
-    final String type;
-    final Integer limit;
-    final ImmutableList<JsonCollation> collations;
-
-    private JsonLimit(String type, Integer limit, ImmutableList<JsonCollation> collations) {
-      this.type = type;
-      this.limit = limit;
-      this.collations = collations;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      writeFieldIf(generator, "limit", limit);
-      writeFieldIf(generator, "columns", collations);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Collation element of a Druid "groupBy" query. */
-  private static class JsonCollation implements Json {
-    final String dimension;
-    final String direction;
-
-    private JsonCollation(String dimension, String direction) {
-      this.dimension = dimension;
-      this.direction = direction;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("dimension", dimension);
-      writeFieldIf(generator, "direction", direction);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Aggregation element that calls the "cardinality" function. */
-  private static class JsonCardinalityAggregation extends JsonAggregation {
-    final List<String> fieldNames;
-
-    private JsonCardinalityAggregation(String type, String name,
-        List<String> fieldNames) {
-      super(type, name, null);
-      this.fieldNames = fieldNames;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      generator.writeStringField("name", name);
-      writeFieldIf(generator, "fieldNames", fieldNames);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Filter element of a Druid "groupBy" or "topN" query. */
-  private abstract static class JsonFilter implements Json {
-    final String type;
-
-    private JsonFilter(String type) {
-      this.type = type;
-    }
-  }
-
-  /** Equality filter. */
-  private static class JsonSelector extends JsonFilter {
-    private final String dimension;
-    private final String value;
-
-    private JsonSelector(String type, String dimension, String value) {
-      super(type);
-      this.dimension = dimension;
-      this.value = value;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      generator.writeStringField("dimension", dimension);
-      generator.writeStringField("value", value);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Bound filter. */
-  private static class JsonBound extends JsonFilter {
-    private final String dimension;
-    private final String lower;
-    private final boolean lowerStrict;
-    private final String upper;
-    private final boolean upperStrict;
-    private final boolean alphaNumeric;
-
-    private JsonBound(String type, String dimension, String lower,
-        boolean lowerStrict, String upper, boolean upperStrict,
-        boolean alphaNumeric) {
-      super(type);
-      this.dimension = dimension;
-      this.lower = lower;
-      this.lowerStrict = lowerStrict;
-      this.upper = upper;
-      this.upperStrict = upperStrict;
-      this.alphaNumeric = alphaNumeric;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      generator.writeStringField("dimension", dimension);
-      if (lower != null) {
-        generator.writeStringField("lower", lower);
-        generator.writeBooleanField("lowerStrict", lowerStrict);
-      }
-      if (upper != null) {
-        generator.writeStringField("upper", upper);
-        generator.writeBooleanField("upperStrict", upperStrict);
-      }
-      generator.writeBooleanField("alphaNumeric", alphaNumeric);
-      generator.writeEndObject();
-    }
-  }
-
-  /** Filter that combines other filters using a boolean operator. */
-  private static class JsonCompositeFilter extends JsonFilter {
-    private final List<? extends JsonFilter> fields;
-
-    private JsonCompositeFilter(String type,
-        List<? extends JsonFilter> fields) {
-      super(type);
-      this.fields = fields;
-    }
-
-    public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", type);
-      switch (type) {
-      case "NOT":
-        writeField(generator, "field", fields.get(0));
-        break;
-      default:
-        writeField(generator, "fields", fields);
-      }
-      generator.writeEndObject();
-    }
-  }
-
-}
-
-// End DruidQuery.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQueryType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQueryType.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQueryType.java
deleted file mode 100644
index 228b307..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidQueryType.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-/**
- * Type of Druid query.
- *
- * TODO: to be removed when Calcite is upgraded to 1.9
- */
-public enum DruidQueryType {
-  SELECT("select"),
-  TOP_N("topN"),
-  GROUP_BY("groupBy"),
-  TIMESERIES("timeseries");
-
-  private final String queryName;
-
-  private DruidQueryType(String queryName) {
-    this.queryName = queryName;
-  }
-
-  public String getQueryName() {
-    return this.queryName;
-  }
-}
-
-// End QueryType.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidRules.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidRules.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidRules.java
deleted file mode 100644
index f68ffa5..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidRules.java
+++ /dev/null
@@ -1,591 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelFieldCollation;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.rex.RexVisitorImpl;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateGranularity;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectSortTransposeRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortProjectTransposeRule;
-import org.joda.time.Interval;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
-/**
- * Rules and relational operators for {@link DruidQuery}.
- *
- * TODO: to be removed when Calcite is upgraded to 1.9
- */
-public class DruidRules {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(DruidRules.class);
-
-  // Avoid instantiation
-  private DruidRules() {
-  }
-
-  public static final DruidFilterRule FILTER = new DruidFilterRule();
-  public static final DruidProjectRule PROJECT = new DruidProjectRule();
-  public static final DruidAggregateRule AGGREGATE = new DruidAggregateRule();
-  public static final DruidProjectAggregateRule PROJECT_AGGREGATE = new DruidProjectAggregateRule();
-  public static final DruidSortRule SORT = new DruidSortRule();
-  public static final DruidProjectSortRule PROJECT_SORT = new DruidProjectSortRule();
-  public static final DruidSortProjectRule SORT_PROJECT = new DruidSortProjectRule();
-
-  /** Predicate that returns whether Druid can not handle an aggregate. */
-  private static final Predicate<AggregateCall> BAD_AGG = new Predicate<AggregateCall>() {
-    public boolean apply(AggregateCall aggregateCall) {
-      switch (aggregateCall.getAggregation().getKind()) {
-        case COUNT:
-        case SUM:
-        case SUM0:
-        case MIN:
-        case MAX:
-          return false;
-        default:
-          return true;
-      }
-    }
-  };
-
-  /**
-   * Rule to push a {@link org.apache.calcite.rel.core.Filter} into a {@link DruidQuery}.
-   */
-  private static class DruidFilterRule extends RelOptRule {
-    private DruidFilterRule() {
-      super(operand(Filter.class,
-              operand(DruidQuery.class, none())));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Filter filter = call.rel(0);
-      final DruidQuery query = call.rel(1);
-      if (!DruidQuery.isValidSignature(query.signature() + 'f')
-              || !query.isValidFilter(filter.getCondition())) {
-        return;
-      }
-      // Timestamp
-      int timestampFieldIdx = -1;
-      for (int i = 0; i < query.getRowType().getFieldCount(); i++) {
-        if (DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(
-                query.getRowType().getFieldList().get(i).getName())) {
-          timestampFieldIdx = i;
-          break;
-        }
-      }
-      final Pair<List<RexNode>, List<RexNode>> pair = splitFilters(
-              filter.getCluster().getRexBuilder(), query, filter.getCondition(), timestampFieldIdx);
-      if (pair == null) {
-        // We can't push anything useful to Druid.
-        return;
-      }
-      List<Interval> intervals = null;
-      if (!pair.left.isEmpty()) {
-        intervals = DruidIntervalUtils.createInterval(
-                query.getRowType().getFieldList().get(timestampFieldIdx).getType(),
-                pair.left);
-        if (intervals == null) {
-          // We can't push anything useful to Druid.
-          return;
-        }
-      }
-      DruidQuery newDruidQuery = query;
-      if (!pair.right.isEmpty()) {
-        if (!validConditions(pair.right)) {
-          return;
-        }
-        final RelNode newFilter = filter.copy(filter.getTraitSet(), Util.last(query.rels),
-                RexUtil.composeConjunction(filter.getCluster().getRexBuilder(), pair.right, false));
-        newDruidQuery = DruidQuery.extendQuery(query, newFilter);
-      }
-      if (intervals != null) {
-        newDruidQuery = DruidQuery.extendQuery(newDruidQuery, intervals);
-      }
-      call.transformTo(newDruidQuery);
-    }
-
-    /* Splits the filter condition in two groups: those that filter on the timestamp column
-     * and those that filter on other fields */
-    private static Pair<List<RexNode>, List<RexNode>> splitFilters(final RexBuilder rexBuilder,
-            final DruidQuery input, RexNode cond, final int timestampFieldIdx) {
-      final List<RexNode> timeRangeNodes = new ArrayList<>();
-      final List<RexNode> otherNodes = new ArrayList<>();
-      List<RexNode> conjs = RelOptUtil.conjunctions(cond);
-      if (conjs.isEmpty()) {
-        // We do not transform
-        return null;
-      }
-      // Number of columns with the dimensions and timestamp
-      int max = input.getRowType().getFieldCount() - input.druidTable.metricFieldNames.size();
-      for (RexNode conj : conjs) {
-        final RelOptUtil.InputReferencedVisitor visitor = new RelOptUtil.InputReferencedVisitor();
-        conj.accept(visitor);
-        if (visitor.inputPosReferenced.contains(timestampFieldIdx)) {
-          if (visitor.inputPosReferenced.size() != 1) {
-            // Complex predicate, transformation currently not supported
-            return null;
-          }
-          timeRangeNodes.add(conj);
-        } else if (!visitor.inputPosReferenced.tailSet(max).isEmpty()) {
-          // Filter on metrics, not supported in Druid
-          return null;
-        } else {
-          otherNodes.add(conj);
-        }
-      }
-      return Pair.of(timeRangeNodes, otherNodes);
-    }
-
-    /* Checks that all conditions are on ref + literal*/
-    private static boolean validConditions(List<RexNode> nodes) {
-      for (RexNode node: nodes) {
-        try {
-          node.accept(
-              new RexVisitorImpl<Void>(true) {
-                @SuppressWarnings("incomplete-switch")
-                @Override public Void visitCall(RexCall call) {
-                  switch (call.getKind()) {
-                    case CAST:
-                      // Only if on top of ref or literal
-                      if (call.getOperands().get(0) instanceof RexInputRef ||
-                              call.getOperands().get(0) instanceof RexLiteral) {
-                        break;
-                      }
-                      // Not supported
-                      throw Util.FoundOne.NULL;
-                    case EQUALS:
-                    case LESS_THAN:
-                    case LESS_THAN_OR_EQUAL:
-                    case GREATER_THAN:
-                    case GREATER_THAN_OR_EQUAL:
-                      // Check cast
-                      RexNode left = call.getOperands().get(0);
-                      if (left.getKind() == SqlKind.CAST) {
-                        left = ((RexCall)left).getOperands().get(0);
-                      }
-                      RexNode right = call.getOperands().get(1);
-                      if (right.getKind() == SqlKind.CAST) {
-                        right = ((RexCall)right).getOperands().get(0);
-                      }
-                      if (left instanceof RexInputRef && right instanceof RexLiteral) {
-                        break;
-                      }
-                      if (right instanceof RexInputRef && left instanceof RexLiteral) {
-                        break;
-                      }
-                      // Not supported if it is not ref + literal
-                      throw Util.FoundOne.NULL;
-                    case BETWEEN:
-                    case IN:
-                      // Not supported here yet
-                      throw Util.FoundOne.NULL;
-                  }
-                  return super.visitCall(call);
-                }
-              });
-        } catch (Util.FoundOne e) {
-          return false;
-        }
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Rule to push a {@link org.apache.calcite.rel.core.Project} into a {@link DruidQuery}.
-   */
-  private static class DruidProjectRule extends RelOptRule {
-    private DruidProjectRule() {
-      super(operand(Project.class,
-              operand(DruidQuery.class, none())));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Project project = call.rel(0);
-      final DruidQuery query = call.rel(1);
-      if (!DruidQuery.isValidSignature(query.signature() + 'p')) {
-        return;
-      }
-
-      if (canProjectAll(project.getProjects())) {
-        // All expressions can be pushed to Druid in their entirety.
-        final RelNode newProject = project.copy(project.getTraitSet(),
-                ImmutableList.of(Util.last(query.rels)));
-        RelNode newNode = DruidQuery.extendQuery(query, newProject);
-        call.transformTo(newNode);
-        return;
-      }
-      final Pair<List<RexNode>, List<RexNode>> pair = splitProjects(
-              project.getCluster().getRexBuilder(), query, project.getProjects());
-      if (pair == null) {
-        // We can't push anything useful to Druid.
-        return;
-      }
-      final List<RexNode> above = pair.left;
-      final List<RexNode> below = pair.right;
-      final RelDataTypeFactory.FieldInfoBuilder builder = project.getCluster().getTypeFactory()
-              .builder();
-      final RelNode input = Util.last(query.rels);
-      for (RexNode e : below) {
-        final String name;
-        if (e instanceof RexInputRef) {
-          name = input.getRowType().getFieldNames().get(((RexInputRef) e).getIndex());
-        } else {
-          name = null;
-        }
-        builder.add(name, e.getType());
-      }
-      final RelNode newProject = project.copy(project.getTraitSet(), input, below, builder.build());
-      final DruidQuery newQuery = DruidQuery.extendQuery(query, newProject);
-      final RelNode newProject2 = project.copy(project.getTraitSet(), newQuery, above,
-              project.getRowType());
-      call.transformTo(newProject2);
-    }
-
-    private static boolean canProjectAll(List<RexNode> nodes) {
-      for (RexNode e : nodes) {
-        if (!(e instanceof RexInputRef)) {
-          return false;
-        }
-      }
-      return true;
-    }
-
-    private static Pair<List<RexNode>, List<RexNode>> splitProjects(final RexBuilder rexBuilder,
-            final RelNode input, List<RexNode> nodes) {
-      final RelOptUtil.InputReferencedVisitor visitor = new RelOptUtil.InputReferencedVisitor();
-      for (RexNode node : nodes) {
-        node.accept(visitor);
-      }
-      if (visitor.inputPosReferenced.size() == input.getRowType().getFieldCount()) {
-        // All inputs are referenced
-        return null;
-      }
-      final List<RexNode> belowNodes = new ArrayList<>();
-      final List<RelDataType> belowTypes = new ArrayList<>();
-      final List<Integer> positions = Lists.newArrayList(visitor.inputPosReferenced);
-      for (int i : positions) {
-        final RexNode node = rexBuilder.makeInputRef(input, i);
-        belowNodes.add(node);
-        belowTypes.add(node.getType());
-      }
-      final List<RexNode> aboveNodes = new ArrayList<>();
-      for (RexNode node : nodes) {
-        aboveNodes.add(node.accept(new RexShuttle() {
-          @Override
-          public RexNode visitInputRef(RexInputRef ref) {
-            final int index = positions.indexOf(ref.getIndex());
-            return rexBuilder.makeInputRef(belowTypes.get(index), index);
-          }
-        }));
-      }
-      return Pair.of(aboveNodes, belowNodes);
-    }
-  }
-
-  /**
-   * Rule to push an {@link org.apache.calcite.rel.core.Aggregate} into a {@link DruidQuery}.
-   */
-  private static class DruidAggregateRule extends RelOptRule {
-    private DruidAggregateRule() {
-      super(operand(Aggregate.class,
-              operand(DruidQuery.class, none())));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Aggregate aggregate = call.rel(0);
-      final DruidQuery query = call.rel(1);
-      if (!DruidQuery.isValidSignature(query.signature() + 'a')) {
-        return;
-      }
-      if (aggregate.indicator
-              || aggregate.getGroupSets().size() != 1
-              || Iterables.any(aggregate.getAggCallList(), BAD_AGG)
-              || !validAggregate(aggregate, query)) {
-        return;
-      }
-      final RelNode newAggregate = aggregate.copy(aggregate.getTraitSet(),
-              ImmutableList.of(Util.last(query.rels)));
-      call.transformTo(DruidQuery.extendQuery(query, newAggregate));
-    }
-
-    /* Check whether agg functions reference timestamp */
-    private static boolean validAggregate(Aggregate aggregate, DruidQuery query) {
-      ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
-      for (AggregateCall aggCall : aggregate.getAggCallList()) {
-        builder.addAll(aggCall.getArgList());
-      }
-      return !checkTimestampRefOnQuery(builder.build(), query.getTopNode());
-    }
-  }
-
-  /**
-   * Rule to push an {@link org.apache.calcite.rel.core.Aggregate} and
-   * {@link org.apache.calcite.rel.core.Project} into a {@link DruidQuery}.
-   */
-  private static class DruidProjectAggregateRule extends RelOptRule {
-    private DruidProjectAggregateRule() {
-      super(operand(Aggregate.class,
-              operand(Project.class,
-                      operand(DruidQuery.class, none()))));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Aggregate aggregate = call.rel(0);
-      final Project project = call.rel(1);
-      final DruidQuery query = call.rel(2);
-      if (!DruidQuery.isValidSignature(query.signature() + 'p' + 'a')) {
-        return;
-      }
-      int timestampIdx;
-      if ((timestampIdx = validProject(project, query)) == -1) {
-        return;
-      }
-      if (aggregate.indicator
-              || aggregate.getGroupSets().size() != 1
-              || Iterables.any(aggregate.getAggCallList(), BAD_AGG)
-              || !validAggregate(aggregate, timestampIdx)) {
-        return;
-      }
-
-      final RelNode newProject = project.copy(project.getTraitSet(),
-              ImmutableList.of(Util.last(query.rels)));
-      final DruidQuery projectDruidQuery = DruidQuery.extendQuery(query, newProject);
-      final RelNode newAggregate = aggregate.copy(aggregate.getTraitSet(),
-              ImmutableList.of(Util.last(projectDruidQuery.rels)));
-      call.transformTo(DruidQuery.extendQuery(projectDruidQuery, newAggregate));
-    }
-
-    /* To be a valid Project, we allow it to contain references, and a single call
-     * to an EXTRACT function on the timestamp column. Returns the reference to
-     * the timestamp, if any. */
-    private static int validProject(Project project, DruidQuery query) {
-      List<RexNode> nodes = project.getProjects();
-      int idxTimestamp = -1;
-      for (int i = 0; i < nodes.size(); i++) {
-        final RexNode e = nodes.get(i);
-        if (e instanceof RexCall) {
-          // It is a call, check that it is EXTRACT and follow-up conditions
-          final RexCall call = (RexCall) e;
-          if (!HiveDateGranularity.ALL_FUNCTIONS.contains(call.getOperator())) {
-            return -1;
-          }
-          if (idxTimestamp != -1) {
-            // Already one usage of timestamp column
-            return -1;
-          }
-          if (!(call.getOperands().get(0) instanceof RexInputRef)) {
-            return -1;
-          }
-          final RexInputRef ref = (RexInputRef) call.getOperands().get(0);
-          if (!(checkTimestampRefOnQuery(ImmutableBitSet.of(ref.getIndex()), query.getTopNode()))) {
-            return -1;
-          }
-          idxTimestamp = i;
-          continue;
-        }
-        if (!(e instanceof RexInputRef)) {
-          // It needs to be a reference
-          return -1;
-        }
-        final RexInputRef ref = (RexInputRef) e;
-        if (checkTimestampRefOnQuery(ImmutableBitSet.of(ref.getIndex()), query.getTopNode())) {
-          if (idxTimestamp != -1) {
-            // Already one usage of timestamp column
-            return -1;
-          }
-          idxTimestamp = i;
-        }
-      }
-      return idxTimestamp;
-    }
-
-    private static boolean validAggregate(Aggregate aggregate, int idx) {
-      if (!aggregate.getGroupSet().get(idx)) {
-        return false;
-      }
-      for (AggregateCall aggCall : aggregate.getAggCallList()) {
-        if (aggCall.getArgList().contains(idx)) {
-          return false;
-        }
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Rule to push an {@link org.apache.calcite.rel.core.Sort} through a
-   * {@link org.apache.calcite.rel.core.Project}. Useful to transform
-   * to complex Druid queries.
-   */
-  private static class DruidProjectSortRule extends HiveSortProjectTransposeRule {
-    private DruidProjectSortRule() {
-      super(operand(Sort.class,
-              operand(Project.class,
-                      operand(DruidQuery.class, none()))));
-    }
-
-    @Override
-    public boolean matches(RelOptRuleCall call) {
-      return true;
-    }
-
-  }
-
-  /**
-   * Rule to push back {@link org.apache.calcite.rel.core.Project} through a
-   * {@link org.apache.calcite.rel.core.Sort}. Useful if after pushing Sort,
-   * we could not push it inside DruidQuery.
-   */
-  private static class DruidSortProjectRule extends HiveProjectSortTransposeRule {
-    private DruidSortProjectRule() {
-      super(operand(Project.class,
-              operand(Sort.class,
-                      operand(DruidQuery.class, none()))));
-    }
-  }
-
-  /**
-   * Rule to push an {@link org.apache.calcite.rel.core.Aggregate} into a {@link DruidQuery}.
-   */
-  private static class DruidSortRule extends RelOptRule {
-    private DruidSortRule() {
-      super(operand(Sort.class,
-              operand(DruidQuery.class, none())));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Sort sort = call.rel(0);
-      final DruidQuery query = call.rel(1);
-      if (!DruidQuery.isValidSignature(query.signature() + 'l')) {
-        return;
-      }
-      // Either it is:
-      // - a sort without limit on the time column on top of
-      //     Agg operator (transformable to timeseries query), or
-      // - it is a sort w/o limit on columns that do not include
-      //     the time column on top of Agg operator, or
-      // - a simple limit on top of other operator than Agg
-      if (!validSortLimit(sort, query)) {
-        return;
-      }
-      final RelNode newSort = sort.copy(sort.getTraitSet(),
-              ImmutableList.of(Util.last(query.rels)));
-      call.transformTo(DruidQuery.extendQuery(query, newSort));
-    }
-
-    /* Check sort valid */
-    private static boolean validSortLimit(Sort sort, DruidQuery query) {
-      if (sort.offset != null && RexLiteral.intValue(sort.offset) != 0) {
-        // offset not supported by Druid
-        return false;
-      }
-      if (query.getTopNode() instanceof Aggregate) {
-        final Aggregate topAgg = (Aggregate) query.getTopNode();
-        final ImmutableBitSet.Builder positionsReferenced = ImmutableBitSet.builder();
-        int metricsRefs = 0;
-        for (RelFieldCollation col : sort.collation.getFieldCollations()) {
-          int idx = col.getFieldIndex();
-          if (idx >= topAgg.getGroupCount()) {
-            metricsRefs++;
-            continue;
-          }
-          positionsReferenced.set(topAgg.getGroupSet().nth(idx));
-        }
-        boolean refsTimestamp =
-                checkTimestampRefOnQuery(positionsReferenced.build(), topAgg.getInput());
-        if (refsTimestamp && metricsRefs != 0) {
-          return false;
-        }
-        return true;
-      }
-      // If it is going to be a Druid select operator, we push the limit iff
-      // 1) it does not contain a sort specification (required by Druid) and
-      // 2) limit is smaller than select threshold, as otherwise it might be
-      //   better to obtain some parallelization and let global limit
-      //   optimizer kick in
-      HiveDruidConf conf = sort.getCluster().getPlanner()
-              .getContext().unwrap(HiveDruidConf.class);
-      return HiveCalciteUtil.pureLimitRelNode(sort) &&
-              RexLiteral.intValue(sort.fetch) <= conf.getSelectThreshold();
-    }
-  }
-
-  /* Check if any of the references leads to the timestamp column */
-  private static boolean checkTimestampRefOnQuery(ImmutableBitSet set, RelNode top) {
-    if (top instanceof Project) {
-      ImmutableBitSet.Builder newSet = ImmutableBitSet.builder();
-      final Project project = (Project) top;
-      for (int index : set) {
-        RexNode node = project.getProjects().get(index);
-        if (node instanceof RexInputRef) {
-          newSet.set(((RexInputRef)node).getIndex());
-        } else if (node instanceof RexCall) {
-          RexCall call = (RexCall) node;
-          assert HiveDateGranularity.ALL_FUNCTIONS.contains(call.getOperator());
-          newSet.set(((RexInputRef)call.getOperands().get(0)).getIndex());
-        }
-      }
-      top = project.getInput();
-      set = newSet.build();
-    }
-
-    // Check if any references the timestamp column
-    for (int index : set) {
-      if (DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(top.getRowType().getFieldNames().get(index))) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-}
-
-// End DruidRules.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidSchema.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidSchema.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidSchema.java
deleted file mode 100644
index 3b3f68a..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidSchema.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-import java.util.Map;
-
-import org.apache.calcite.schema.Table;
-import org.apache.calcite.schema.impl.AbstractSchema;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
-/**
- * Schema mapped onto a Druid instance.
- *
- * TODO: to be removed when Calcite is upgraded to 1.9
- */
-public class DruidSchema extends AbstractSchema {
-  final String url;
-
-  /**
-   * Creates a Druid schema.
-   *
-   * @param url URL of query REST service
-   */
-  public DruidSchema(String url) {
-    this.url = Preconditions.checkNotNull(url);
-  }
-
-  @Override protected Map<String, Table> getTableMap() {
-    final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();
-    return builder.build();
-  }
-}
-
-// End DruidSchema.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidTable.java
deleted file mode 100644
index 7288291..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidTable.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.calcite.interpreter.BindableConvention;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.logical.LogicalTableScan;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelProtoDataType;
-import org.apache.calcite.schema.TranslatableTable;
-import org.apache.calcite.schema.impl.AbstractTable;
-import org.joda.time.DateTime;
-import org.joda.time.Interval;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-
-/**
- * Table mapped onto a Druid table.
- *
- * TODO: to be removed when Calcite is upgraded to 1.9
- */
-public class DruidTable extends AbstractTable implements TranslatableTable {
-
-  public static final String DEFAULT_TIMESTAMP_COLUMN = "__time";
-  public static final Interval DEFAULT_INTERVAL = new Interval(
-          new DateTime("1900-01-01"),
-          new DateTime("3000-01-01")
-  );
-
-  final DruidSchema schema;
-  final String dataSource;
-  final RelDataType rowType;
-  final RelProtoDataType protoRowType;
-  final ImmutableSet<String> metricFieldNames;
-  final ImmutableList<Interval> intervals;
-  final String timestampFieldName;
-
-  /**
-   * Creates a Druid table.
-   *
-   * @param schema Druid schema that contains this table
-   * @param dataSource Druid data source name
-   * @param protoRowType Field names and types
-   * @param metricFieldNames Names of fields that are metrics
-   * @param interval Default interval if query does not constrain the time
-   * @param timestampFieldName Name of the column that contains the time
-   */
-  public DruidTable(DruidSchema schema, String dataSource,
-      RelProtoDataType protoRowType, Set<String> metricFieldNames,
-      List<Interval> intervals, String timestampFieldName) {
-    this.schema = Preconditions.checkNotNull(schema);
-    this.dataSource = Preconditions.checkNotNull(dataSource);
-    this.rowType = null;
-    this.protoRowType = protoRowType;
-    this.metricFieldNames = ImmutableSet.copyOf(metricFieldNames);
-    this.intervals = ImmutableList.copyOf(intervals);
-    this.timestampFieldName = Preconditions.checkNotNull(timestampFieldName);
-  }
-
-  public DruidTable(DruidSchema schema, String dataSource,
-      RelDataType rowType, Set<String> metricFieldNames,
-      List<Interval> intervals, String timestampFieldName) {
-    this.schema = Preconditions.checkNotNull(schema);
-    this.dataSource = Preconditions.checkNotNull(dataSource);
-    this.rowType = Preconditions.checkNotNull(rowType);
-    this.protoRowType = null;
-    this.metricFieldNames = ImmutableSet.copyOf(metricFieldNames);
-    this.intervals = ImmutableList.copyOf(intervals);
-    this.timestampFieldName = Preconditions.checkNotNull(timestampFieldName);
-  }
-
-  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
-    final RelDataType thisRowType;
-    if (rowType != null) {
-      thisRowType = rowType;
-    } else {
-      // Generate
-      thisRowType = protoRowType.apply(typeFactory);
-    }
-    final List<String> fieldNames = thisRowType.getFieldNames();
-    Preconditions.checkArgument(fieldNames.contains(timestampFieldName));
-    Preconditions.checkArgument(fieldNames.containsAll(metricFieldNames));
-    return thisRowType;
-  }
-
-  public RelNode toRel(RelOptTable.ToRelContext context,
-      RelOptTable relOptTable) {
-    final RelOptCluster cluster = context.getCluster();
-    final TableScan scan = LogicalTableScan.create(cluster, relOptTable);
-    return DruidQuery.create(cluster,
-        cluster.traitSetOf(BindableConvention.INSTANCE), relOptTable, this,
-        ImmutableList.<RelNode>of(scan));
-  }
-
-}
-
-// End DruidTable.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/HiveDruidConf.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/HiveDruidConf.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/HiveDruidConf.java
deleted file mode 100644
index 0686dff..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/HiveDruidConf.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-public class HiveDruidConf {
-
-  private int selectThreshold;
-
-
-  public HiveDruidConf(int selectThreshold) {
-    this.selectThreshold = selectThreshold;
-  }
-
-  public int getSelectThreshold() {
-    return selectThreshold;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
index dc6b152..6df6026 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveAggregate.java
@@ -35,7 +35,6 @@ import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.IntList;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 
 import com.google.common.collect.Sets;
@@ -90,7 +89,7 @@ public class HiveAggregate extends Aggregate implements HiveRelNode {
       final RelDataType inputRowType, boolean indicator,
       ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets,
       final List<AggregateCall> aggCalls) {
-    final IntList groupList = groupSet.toList();
+    final List<Integer> groupList = groupSet.asList();
     assert groupList.size() == groupSet.cardinality();
     final RelDataTypeFactory.FieldInfoBuilder builder = typeFactory.builder();
     final List<RelDataTypeField> fieldList = inputRowType.getFieldList();

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateGranularity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateGranularity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateGranularity.java
deleted file mode 100644
index b3f8d9b..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateGranularity.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-
-import java.util.Set;
-
-import org.apache.calcite.sql.SqlFunction;
-import org.apache.calcite.sql.SqlFunctionCategory;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.type.OperandTypes;
-import org.apache.calcite.sql.type.ReturnTypes;
-
-import com.google.common.collect.Sets;
-
-public class HiveDateGranularity extends SqlFunction {
-
-  public static final SqlFunction YEAR = new HiveDateGranularity("YEAR");
-  public static final SqlFunction QUARTER = new HiveDateGranularity("QUARTER");
-  public static final SqlFunction MONTH = new HiveDateGranularity("MONTH");
-  public static final SqlFunction WEEK = new HiveDateGranularity("WEEK");
-  public static final SqlFunction DAY = new HiveDateGranularity("DAY");
-  public static final SqlFunction HOUR = new HiveDateGranularity("HOUR");
-  public static final SqlFunction MINUTE = new HiveDateGranularity("MINUTE");
-  public static final SqlFunction SECOND = new HiveDateGranularity("SECOND");
-
-  public static final Set<SqlFunction> ALL_FUNCTIONS =
-          Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND);
-
-  private HiveDateGranularity(String name) {
-    super(
-        name,
-        SqlKind.OTHER_FUNCTION,
-        ReturnTypes.TIME_NULLABLE,
-        null,
-        OperandTypes.ANY,
-        SqlFunctionCategory.TIMEDATE);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
new file mode 100644
index 0000000..4edc4df
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import java.util.Set;
+
+import org.apache.calcite.sql.SqlFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+import com.google.common.collect.Sets;
+
+public class HiveExtractDate extends SqlFunction {
+
+  public static final SqlFunction YEAR = new HiveExtractDate("YEAR");
+  public static final SqlFunction QUARTER = new HiveExtractDate("QUARTER");
+  public static final SqlFunction MONTH = new HiveExtractDate("MONTH");
+  public static final SqlFunction WEEK = new HiveExtractDate("WEEK");
+  public static final SqlFunction DAY = new HiveExtractDate("DAY");
+  public static final SqlFunction HOUR = new HiveExtractDate("HOUR");
+  public static final SqlFunction MINUTE = new HiveExtractDate("MINUTE");
+  public static final SqlFunction SECOND = new HiveExtractDate("SECOND");
+
+  public static final Set<SqlFunction> ALL_FUNCTIONS =
+          Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND);
+
+  private HiveExtractDate(String name) {
+    super(name, SqlKind.EXTRACT, ReturnTypes.INTEGER_NULLABLE, null,
+            OperandTypes.INTERVALINTERVAL_INTERVALDATETIME,
+            SqlFunctionCategory.SYSTEM);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFloorDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFloorDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFloorDate.java
new file mode 100644
index 0000000..3d104ef
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFloorDate.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import java.util.Set;
+
+import org.apache.calcite.sql.SqlFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperatorBinding;
+import org.apache.calcite.sql.fun.SqlMonotonicUnaryFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.validate.SqlMonotonicity;
+
+import com.google.common.collect.Sets;
+
+public class HiveFloorDate extends SqlMonotonicUnaryFunction {
+
+  public static final SqlFunction YEAR = new HiveFloorDate("FLOOR_YEAR");
+  public static final SqlFunction QUARTER = new HiveFloorDate("FLOOR_QUARTER");
+  public static final SqlFunction MONTH = new HiveFloorDate("FLOOR_MONTH");
+  public static final SqlFunction WEEK = new HiveFloorDate("FLOOR_WEEK");
+  public static final SqlFunction DAY = new HiveFloorDate("FLOOR_DAY");
+  public static final SqlFunction HOUR = new HiveFloorDate("FLOOR_HOUR");
+  public static final SqlFunction MINUTE = new HiveFloorDate("FLOOR_MINUTE");
+  public static final SqlFunction SECOND = new HiveFloorDate("FLOOR_SECOND");
+
+  public static final Set<SqlFunction> ALL_FUNCTIONS =
+          Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND);
+
+  private HiveFloorDate(String name) {
+    super(name, SqlKind.FLOOR, ReturnTypes.ARG0_OR_EXACT_NO_SCALE, null,
+        OperandTypes.sequence(
+            "'" + SqlKind.FLOOR + "(<DATE> TO <TIME_UNIT>)'\n"
+            + "'" + SqlKind.FLOOR + "(<TIME> TO <TIME_UNIT>)'\n"
+            + "'" + SqlKind.FLOOR + "(<TIMESTAMP> TO <TIME_UNIT>)'",
+            OperandTypes.DATETIME,
+            OperandTypes.ANY),
+        SqlFunctionCategory.NUMERIC);
+  }
+
+  @Override
+  public SqlMonotonicity getMonotonicity(SqlOperatorBinding call) {
+    // Monotonic iff its first argument is, but not strict.
+    return call.getOperandMonotonicity(0).unstrict();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
index e9a4d88..87e755c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
@@ -133,9 +133,10 @@ public class HiveAggregateJoinTransposeRule extends AggregateJoinTransposeRule {
     // Split join condition
     final List<Integer> leftKeys = Lists.newArrayList();
     final List<Integer> rightKeys = Lists.newArrayList();
+    final List<Boolean> filterNulls = Lists.newArrayList();
     RexNode nonEquiConj =
         RelOptUtil.splitJoinCondition(join.getLeft(), join.getRight(),
-            join.getCondition(), leftKeys, rightKeys);
+            join.getCondition(), leftKeys, rightKeys, filterNulls);
     // If it contains non-equi join conditions, we bail out
     if (!nonEquiConj.isAlwaysTrue()) {
       return;
@@ -271,7 +272,8 @@ public class HiveAggregateJoinTransposeRule extends AggregateJoinTransposeRule {
       RelOptUtil.areRowTypesEqual(r.getRowType(), aggregate.getRowType(), false)) {
       // no need to aggregate
     } else {
-      r = RelOptUtil.createProject(r, projects, null, true, projectFactory);
+      r = RelOptUtil.createProject(r, projects, null, true,
+              relBuilderFactory.create(aggregate.getCluster(), null));
       if (allColumnsInAggregate) {
         // let's see if we can convert
         List<RexNode> projects2 = new ArrayList<>();
@@ -290,7 +292,8 @@ public class HiveAggregateJoinTransposeRule extends AggregateJoinTransposeRule {
         if (projects2.size()
             == aggregate.getGroupSet().cardinality() + newAggCalls.size()) {
           // We successfully converted agg calls into projects.
-          r = RelOptUtil.createProject(r, projects2, null, true, projectFactory);
+          r = RelOptUtil.createProject(r, projects2, null, true,
+                  relBuilderFactory.create(aggregate.getCluster(), null));
           break b;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
index 8af8a0d..c243266 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
@@ -141,7 +141,8 @@ public class HiveAggregateProjectMergeRule extends RelOptRule {
            i < newAggregate.getRowType().getFieldCount(); i++) {
         posList.add(i);
       }
-      rel = HiveRelOptUtil.createProject(HiveRelFactories.HIVE_PROJECT_FACTORY,
+      rel = HiveRelOptUtil.createProject(
+          HiveRelFactories.HIVE_BUILDER.create(aggregate.getCluster(), null),
           rel, posList);
 
     }


[62/62] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
HIVE-14671 : merge master into hive-14535 (Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3f34134a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3f34134a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3f34134a

Branch: refs/heads/hive-14535
Commit: 3f34134a66c866d0999634030f00b7f68827376b
Parents: b6571ea 57044c4
Author: Wei Zheng <we...@apache.org>
Authored: Tue Oct 18 17:15:18 2016 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Tue Oct 18 17:15:18 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hive/common/util/DateUtils.java  |    20 -
 .../druid/HiveDruidQueryBasedInputFormat.java   |     6 +-
 .../serde/DruidGroupByQueryRecordReader.java    |     2 +-
 .../serde/DruidSelectQueryRecordReader.java     |     2 +-
 .../hadoop/hive/druid/serde/DruidSerDe.java     |     2 +-
 .../serde/DruidTimeseriesQueryRecordReader.java |     2 +-
 .../druid/serde/DruidTopNQueryRecordReader.java |     2 +-
 .../TestJdbcWithSQLAuthUDFBlacklist.java        |     2 +-
 .../test/resources/testconfiguration.properties |     5 +-
 .../hadoop/hive/cli/control/CliConfigs.java     |     2 +-
 .../apache/hive/jdbc/HiveQueryResultSet.java    |     5 -
 pom.xml                                         |     3 +-
 ql/pom.xml                                      |     7 +-
 .../ColumnArithmeticColumn.txt                  |     7 +-
 .../ColumnArithmeticColumnDecimal.txt           |     5 -
 .../ColumnArithmeticColumnWithConvert.txt       |   173 +
 .../ColumnArithmeticScalar.txt                  |     5 -
 .../ColumnArithmeticScalarDecimal.txt           |     5 -
 .../ColumnArithmeticScalarWithConvert.txt       |   150 +
 .../ExpressionTemplates/ColumnCompareColumn.txt |     5 -
 .../ExpressionTemplates/ColumnCompareScalar.txt |     5 -
 .../ExpressionTemplates/ColumnDivideColumn.txt  |     5 -
 .../ColumnDivideColumnDecimal.txt               |     5 -
 .../ExpressionTemplates/ColumnDivideScalar.txt  |     5 -
 .../ColumnDivideScalarDecimal.txt               |     5 -
 .../ExpressionTemplates/ColumnUnaryFunc.txt     |     5 -
 .../ExpressionTemplates/ColumnUnaryMinus.txt    |     5 -
 ...eColumnArithmeticIntervalYearMonthColumn.txt |     5 -
 ...eColumnArithmeticIntervalYearMonthScalar.txt |     5 -
 .../DateColumnArithmeticTimestampColumn.txt     |     5 -
 .../DateColumnArithmeticTimestampScalar.txt     |     5 -
 ...eScalarArithmeticIntervalYearMonthColumn.txt |     5 -
 .../DateScalarArithmeticTimestampColumn.txt     |     5 -
 .../DecimalColumnUnaryFunc.txt                  |     5 -
 .../ExpressionTemplates/FilterColumnBetween.txt |     7 +-
 .../FilterColumnCompareColumn.txt               |     9 +-
 .../FilterColumnCompareScalar.txt               |     9 +-
 .../FilterDecimalColumnBetween.txt              |     5 -
 .../FilterDecimalColumnCompareDecimalColumn.txt |     5 -
 .../FilterDecimalColumnCompareDecimalScalar.txt |     5 -
 .../FilterDecimalScalarCompareDecimalColumn.txt |     5 -
 ...erLongDoubleColumnCompareTimestampColumn.txt |     5 -
 ...erLongDoubleScalarCompareTimestampColumn.txt |     5 -
 .../FilterScalarCompareColumn.txt               |     9 +-
 .../FilterStringColumnBetween.txt               |     9 +-
 ...tringGroupColumnCompareStringGroupColumn.txt |     5 -
 ...gGroupColumnCompareStringGroupScalarBase.txt |     7 -
 ...gGroupScalarCompareStringGroupColumnBase.txt |     8 -
 .../FilterTimestampColumnBetween.txt            |     5 -
 ...erTimestampColumnCompareLongDoubleColumn.txt |     5 -
 ...erTimestampColumnCompareLongDoubleScalar.txt |     5 -
 ...terTimestampColumnCompareTimestampColumn.txt |     5 -
 ...terTimestampColumnCompareTimestampScalar.txt |     5 -
 ...erTimestampScalarCompareLongDoubleColumn.txt |     5 -
 ...terTimestampScalarCompareTimestampColumn.txt |     5 -
 .../FilterTruncStringColumnBetween.txt          |    10 +-
 .../ExpressionTemplates/IfExprColumnScalar.txt  |     5 -
 .../ExpressionTemplates/IfExprScalarColumn.txt  |     5 -
 .../ExpressionTemplates/IfExprScalarScalar.txt  |     5 -
 ...ervalYearMonthColumnArithmeticDateColumn.txt |     5 -
 ...ervalYearMonthColumnArithmeticDateScalar.txt |     5 -
 ...YearMonthColumnArithmeticTimestampColumn.txt |     5 -
 ...YearMonthColumnArithmeticTimestampScalar.txt |     5 -
 ...ervalYearMonthScalarArithmeticDateColumn.txt |     5 -
 ...YearMonthScalarArithmeticTimestampColumn.txt |     5 -
 .../LongDoubleColumnCompareTimestampColumn.txt  |     5 -
 .../LongDoubleColumnCompareTimestampScalar.txt  |     4 -
 .../LongDoubleScalarCompareTimestampColumn.txt  |     5 -
 .../ScalarArithmeticColumn.txt                  |     5 -
 .../ScalarArithmeticColumnDecimal.txt           |     5 -
 .../ScalarArithmeticColumnWithConvert.txt       |   163 +
 .../ExpressionTemplates/ScalarCompareColumn.txt |     5 -
 .../ExpressionTemplates/ScalarDivideColumn.txt  |     5 -
 .../ScalarDivideColumnDecimal.txt               |     5 -
 ...tringGroupColumnCompareStringGroupColumn.txt |     5 -
 ...gGroupColumnCompareStringGroupScalarBase.txt |     6 -
 ...tringGroupColumnCompareTruncStringScalar.txt |     7 -
 ...gGroupScalarCompareStringGroupColumnBase.txt |     7 -
 .../TimestampColumnArithmeticDateColumn.txt     |     5 -
 .../TimestampColumnArithmeticDateScalar.txt     |     5 -
 ...pColumnArithmeticIntervalYearMonthColumn.txt |     5 -
 ...pColumnArithmeticIntervalYearMonthScalar.txt |     5 -
 ...TimestampColumnArithmeticTimestampColumn.txt |     5 -
 ...TimestampColumnArithmeticTimestampScalar.txt |     5 -
 .../TimestampColumnCompareLongDoubleColumn.txt  |     5 -
 .../TimestampColumnCompareLongDoubleScalar.txt  |     5 -
 .../TimestampColumnCompareTimestampColumn.txt   |     5 -
 .../TimestampColumnCompareTimestampScalar.txt   |     5 -
 .../TimestampScalarArithmeticDateColumn.txt     |     5 -
 ...pScalarArithmeticIntervalYearMonthColumn.txt |     5 -
 ...TimestampScalarArithmeticTimestampColumn.txt |     5 -
 .../TimestampScalarCompareTimestampColumn.txt   |     5 -
 ...runcStringScalarCompareStringGroupColumn.txt |     2 -
 .../UDAFTemplates/VectorUDAFAvg.txt             |     6 -
 .../UDAFTemplates/VectorUDAFMinMax.txt          |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |     6 -
 .../VectorUDAFMinMaxIntervalDayTime.txt         |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |     6 -
 .../UDAFTemplates/VectorUDAFSum.txt             |     6 -
 .../UDAFTemplates/VectorUDAFVar.txt             |     6 -
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |     6 -
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   252 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |     2 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |     5 -
 .../hadoop/hive/ql/exec/SelectOperator.java     |     1 +
 .../hive/ql/exec/persistence/MapJoinKey.java    |     9 +-
 .../hive/ql/exec/spark/HashTableLoader.java     |     2 +-
 .../ql/exec/vector/VectorColumnMapping.java     |    12 +-
 .../ql/exec/vector/VectorColumnOrderedMap.java  |    33 +-
 .../exec/vector/VectorColumnOutputMapping.java  |     7 +-
 .../exec/vector/VectorColumnSourceMapping.java  |     7 +-
 .../hive/ql/exec/vector/VectorCopyRow.java      |     3 +-
 .../ql/exec/vector/VectorFilterOperator.java    |     4 +-
 .../ql/exec/vector/VectorGroupByOperator.java   |    21 +-
 .../ql/exec/vector/VectorSelectOperator.java    |    33 +-
 .../ql/exec/vector/VectorizationContext.java    |    38 +-
 .../exec/vector/VectorizationContextRegion.java |     4 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |     3 -
 ...AbstractFilterStringColLikeStringScalar.java |     5 -
 .../CastBooleanToCharViaLongToChar.java         |     5 -
 .../CastBooleanToVarCharViaLongToVarChar.java   |     5 -
 .../exec/vector/expressions/CastDateToChar.java |     4 -
 .../vector/expressions/CastDateToVarChar.java   |     5 -
 .../vector/expressions/CastDecimalToChar.java   |     5 -
 .../expressions/CastDecimalToDecimal.java       |     5 -
 .../expressions/CastDecimalToVarChar.java       |     5 -
 .../expressions/CastDoubleToTimestamp.java      |     5 -
 .../exec/vector/expressions/CastLongToChar.java |     5 -
 .../exec/vector/expressions/CastLongToDate.java |     5 -
 .../vector/expressions/CastLongToTimestamp.java |     5 -
 .../vector/expressions/CastLongToVarChar.java   |     5 -
 .../CastMillisecondsLongToTimestamp.java        |     5 -
 .../expressions/CastStringGroupToChar.java      |     4 -
 .../expressions/CastStringGroupToVarChar.java   |     5 -
 .../vector/expressions/CastStringToDate.java    |     5 -
 .../vector/expressions/CastStringToDecimal.java |     5 -
 .../CastStringToIntervalDayTime.java            |     5 -
 .../CastStringToIntervalYearMonth.java          |     5 -
 .../expressions/CastTimestampToBoolean.java     |     7 +-
 .../expressions/CastTimestampToDouble.java      |     5 -
 .../vector/expressions/CastTimestampToLong.java |     7 +-
 .../ql/exec/vector/expressions/ColAndCol.java   |     5 -
 .../ql/exec/vector/expressions/ColOrCol.java    |     5 -
 .../expressions/ConstantVectorExpression.java   |    33 -
 .../expressions/DateColSubtractDateColumn.java  |     4 -
 .../expressions/DateColSubtractDateScalar.java  |     5 -
 .../DateScalarSubtractDateColumn.java           |     5 -
 .../vector/expressions/DecimalColumnInList.java |     7 -
 .../expressions/DecimalToStringUnaryUDF.java    |     5 -
 .../vector/expressions/DoubleColumnInList.java  |     7 -
 .../vector/expressions/FilterColAndScalar.java  |     5 -
 .../vector/expressions/FilterColOrScalar.java   |     5 -
 .../expressions/FilterDecimalColumnInList.java  |     7 -
 .../expressions/FilterDoubleColumnInList.java   |     6 -
 .../vector/expressions/FilterExprAndExpr.java   |     8 -
 .../vector/expressions/FilterExprOrExpr.java    |     6 -
 .../expressions/FilterLongColumnInList.java     |     7 -
 .../expressions/FilterScalarAndColumn.java      |     5 -
 .../expressions/FilterScalarOrColumn.java       |     5 -
 .../expressions/FilterStringColumnInList.java   |     7 -
 .../expressions/FilterStructColumnInList.java   |     9 -
 .../FilterTimestampColumnInList.java            |     7 -
 .../vector/expressions/FuncDecimalToDouble.java |     5 -
 .../vector/expressions/FuncDecimalToLong.java   |     5 -
 .../expressions/FuncDecimalToTimestamp.java     |     5 -
 .../vector/expressions/FuncDoubleToDecimal.java |     4 -
 .../vector/expressions/FuncLongToDecimal.java   |     4 -
 .../vector/expressions/FuncLongToString.java    |     4 -
 .../FuncRoundWithNumDigitsDecimalToDecimal.java |     4 -
 .../expressions/FuncTimestampToDecimal.java     |     5 -
 .../vector/expressions/FuncTimestampToLong.java |     5 -
 .../vector/expressions/IdentityExpression.java  |     5 -
 .../IfExprDoubleColumnDoubleColumn.java         |     5 -
 .../IfExprIntervalDayTimeColumnColumn.java      |     5 -
 .../IfExprIntervalDayTimeColumnScalar.java      |     5 -
 .../IfExprIntervalDayTimeScalarColumn.java      |     5 -
 .../IfExprIntervalDayTimeScalarScalar.java      |     5 -
 .../expressions/IfExprLongColumnLongColumn.java |     5 -
 ...fExprStringGroupColumnStringGroupColumn.java |     5 -
 .../IfExprStringGroupColumnStringScalar.java    |     7 -
 .../IfExprStringScalarStringGroupColumn.java    |     7 -
 .../IfExprStringScalarStringScalar.java         |     7 -
 .../IfExprTimestampColumnColumnBase.java        |     5 -
 .../IfExprTimestampColumnScalarBase.java        |     6 -
 .../IfExprTimestampScalarColumnBase.java        |     6 -
 .../IfExprTimestampScalarScalarBase.java        |     6 -
 .../ql/exec/vector/expressions/IsNotNull.java   |     5 -
 .../hive/ql/exec/vector/expressions/IsNull.java |     5 -
 .../expressions/LongColDivideLongColumn.java    |     5 -
 .../expressions/LongColDivideLongScalar.java    |     5 -
 .../expressions/LongColEqualLongColumn.java     |     5 -
 .../expressions/LongColEqualLongScalar.java     |     4 -
 .../LongColGreaterEqualLongColumn.java          |     5 -
 .../LongColGreaterEqualLongScalar.java          |     5 -
 .../expressions/LongColGreaterLongColumn.java   |     5 -
 .../expressions/LongColGreaterLongScalar.java   |     5 -
 .../expressions/LongColLessEqualLongColumn.java |     5 -
 .../expressions/LongColLessEqualLongScalar.java |     5 -
 .../expressions/LongColLessLongColumn.java      |     5 -
 .../expressions/LongColLessLongScalar.java      |     5 -
 .../expressions/LongColNotEqualLongColumn.java  |     5 -
 .../expressions/LongColNotEqualLongScalar.java  |     5 -
 .../vector/expressions/LongColumnInList.java    |     6 -
 .../expressions/LongScalarDivideLongColumn.java |     5 -
 .../expressions/LongScalarEqualLongColumn.java  |     4 -
 .../LongScalarGreaterEqualLongColumn.java       |     5 -
 .../LongScalarGreaterLongColumn.java            |     5 -
 .../LongScalarLessEqualLongColumn.java          |     4 -
 .../expressions/LongScalarLessLongColumn.java   |     5 -
 .../LongScalarNotEqualLongColumn.java           |     5 -
 .../expressions/LongToStringUnaryUDF.java       |     5 -
 .../expressions/MathFuncDoubleToDouble.java     |     7 +-
 .../expressions/MathFuncLongToDouble.java       |     5 -
 .../vector/expressions/MathFuncLongToLong.java  |     7 +-
 .../hive/ql/exec/vector/expressions/NotCol.java |     5 -
 .../expressions/PosModDoubleToDouble.java       |     5 -
 .../vector/expressions/PosModLongToLong.java    |     5 -
 .../RoundWithNumDigitsDoubleToDouble.java       |     5 -
 .../vector/expressions/SelectColumnIsFalse.java |     4 -
 .../expressions/SelectColumnIsNotNull.java      |     5 -
 .../vector/expressions/SelectColumnIsNull.java  |     5 -
 .../vector/expressions/SelectColumnIsTrue.java  |     5 -
 .../SelectStringColLikeStringScalar.java        |    10 +-
 .../vector/expressions/StringColumnInList.java  |     5 -
 .../StringGroupColConcatStringScalar.java       |     7 -
 .../expressions/StringGroupConcatColCol.java    |     5 -
 .../exec/vector/expressions/StringLength.java   |     4 -
 .../StringScalarConcatStringGroupCol.java       |     7 -
 .../expressions/StringSubstrColStart.java       |     5 -
 .../expressions/StringSubstrColStartLen.java    |     5 -
 .../exec/vector/expressions/StringUnaryUDF.java |     5 -
 .../expressions/StringUnaryUDFDirect.java       |     4 -
 .../vector/expressions/StructColumnInList.java  |     8 -
 .../expressions/TimestampColumnInList.java      |     6 -
 .../expressions/TimestampToStringUnaryUDF.java  |     4 -
 .../exec/vector/expressions/VectorCoalesce.java |     5 -
 .../ql/exec/vector/expressions/VectorElt.java   |     7 -
 .../vector/expressions/VectorExpression.java    |    44 +-
 .../expressions/VectorUDFDateAddColCol.java     |     5 -
 .../expressions/VectorUDFDateAddColScalar.java  |     5 -
 .../expressions/VectorUDFDateAddScalarCol.java  |     5 -
 .../expressions/VectorUDFDateDiffColCol.java    |     5 -
 .../expressions/VectorUDFDateDiffColScalar.java |     6 -
 .../expressions/VectorUDFDateDiffScalarCol.java |     5 -
 .../VectorUDFTimestampFieldDate.java            |    10 -
 .../VectorUDFTimestampFieldString.java          |     9 -
 .../VectorUDFTimestampFieldTimestamp.java       |     9 -
 .../aggregates/VectorAggregateExpression.java   |    19 -
 .../aggregates/VectorUDAFAvgDecimal.java        |     6 -
 .../aggregates/VectorUDAFAvgTimestamp.java      |     6 -
 .../expressions/aggregates/VectorUDAFCount.java |     6 -
 .../aggregates/VectorUDAFCountMerge.java        |     6 -
 .../aggregates/VectorUDAFCountStar.java         |     7 -
 .../aggregates/VectorUDAFStdPopTimestamp.java   |     6 -
 .../aggregates/VectorUDAFStdSampTimestamp.java  |     6 -
 .../aggregates/VectorUDAFSumDecimal.java        |     6 -
 .../aggregates/VectorUDAFVarPopTimestamp.java   |     6 -
 .../aggregates/VectorUDAFVarSampTimestamp.java  |     6 -
 .../mapjoin/VectorMapJoinCommonOperator.java    |   363 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |    11 +-
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |    15 +-
 ...VectorMapJoinInnerBigOnlyStringOperator.java |    11 +-
 .../mapjoin/VectorMapJoinInnerLongOperator.java |    11 +-
 .../VectorMapJoinInnerMultiKeyOperator.java     |    15 +-
 .../VectorMapJoinInnerStringOperator.java       |    11 +-
 .../VectorMapJoinLeftSemiLongOperator.java      |    11 +-
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |    15 +-
 .../VectorMapJoinLeftSemiStringOperator.java    |    11 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |    11 +-
 .../VectorMapJoinOuterMultiKeyOperator.java     |    15 +-
 .../VectorMapJoinOuterStringOperator.java       |    11 +-
 .../fast/VectorMapJoinFastTableContainer.java   |     2 +-
 .../VectorMapJoinOptimizedCreateHashTable.java  |     2 +-
 .../VectorReduceSinkCommonOperator.java         |     2 +-
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |     5 -
 .../calcite/HiveDefaultRelMetadataProvider.java |     2 +-
 .../optimizer/calcite/HivePlannerContext.java   |     9 +-
 .../ql/optimizer/calcite/HiveRelBuilder.java    |    18 +-
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |     8 +-
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  |   821 --
 .../optimizer/calcite/HiveTypeSystemImpl.java   |    39 +-
 .../calcite/cost/HiveDefaultCostModel.java      |     7 +-
 .../optimizer/calcite/cost/HiveRelMdCost.java   |    10 +-
 .../calcite/druid/DruidIntervalUtils.java       |   466 -
 .../ql/optimizer/calcite/druid/DruidQuery.java  |  1053 --
 .../optimizer/calcite/druid/DruidQueryType.java |    42 -
 .../ql/optimizer/calcite/druid/DruidRules.java  |   591 -
 .../ql/optimizer/calcite/druid/DruidSchema.java |    51 -
 .../ql/optimizer/calcite/druid/DruidTable.java  |   121 -
 .../optimizer/calcite/druid/HiveDruidConf.java  |    33 -
 .../calcite/reloperators/HiveAggregate.java     |     3 +-
 .../reloperators/HiveDateGranularity.java       |    54 -
 .../calcite/reloperators/HiveExtractDate.java   |    50 +
 .../calcite/reloperators/HiveFloorDate.java     |    64 +
 .../rules/HiveAggregateJoinTransposeRule.java   |     9 +-
 .../rules/HiveAggregateProjectMergeRule.java    |     3 +-
 .../rules/HiveFilterProjectTSTransposeRule.java |    16 +-
 .../rules/HiveFilterProjectTransposeRule.java   |    21 +-
 .../calcite/rules/HivePreFilteringRule.java     |     7 +-
 .../rules/HiveReduceExpressionsRule.java        |   914 +-
 .../HiveReduceExpressionsWithStatsRule.java     |     5 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |   243 +-
 .../calcite/stats/HiveRelMdCollation.java       |    10 +-
 .../calcite/stats/HiveRelMdDistribution.java    |    10 +-
 .../calcite/stats/HiveRelMdPredicates.java      |    31 +-
 .../calcite/stats/HiveRelMdSelectivity.java     |    28 +-
 .../optimizer/calcite/stats/HiveRelMdSize.java  |    13 +-
 .../calcite/stats/HiveRelMdUniqueKeys.java      |    72 +-
 .../calcite/translator/ASTBuilder.java          |    49 +-
 .../calcite/translator/ASTConverter.java        |    51 +-
 .../calcite/translator/ExprNodeConverter.java   |    49 +-
 .../translator/PlanModifierForASTConv.java      |     5 +
 .../calcite/translator/RexNodeConverter.java    |    61 +-
 .../translator/SqlFunctionConverter.java        |    37 +-
 .../calcite/translator/TypeConverter.java       |    41 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |  1458 +--
 .../ql/optimizer/physical/VectorizerReason.java |   123 -
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    40 +-
 .../hive/ql/parse/ExplainConfiguration.java     |    39 -
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    38 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     5 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    28 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |     6 -
 .../hive/ql/parse/TypeCheckProcFactory.java     |     2 +-
 .../hive/ql/plan/AbstractOperatorDesc.java      |    12 -
 .../hadoop/hive/ql/plan/AbstractVectorDesc.java |    14 -
 .../hadoop/hive/ql/plan/AppMasterEventDesc.java |    24 -
 .../apache/hadoop/hive/ql/plan/BaseWork.java    |   197 +-
 .../org/apache/hadoop/hive/ql/plan/Explain.java |    29 -
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |    13 -
 .../apache/hadoop/hive/ql/plan/FetchWork.java   |    45 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |    18 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |    28 -
 .../apache/hadoop/hive/ql/plan/GroupByDesc.java |    84 +-
 .../hadoop/hive/ql/plan/HashTableSinkDesc.java  |     1 -
 .../apache/hadoop/hive/ql/plan/LimitDesc.java   |    18 -
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |   218 +-
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |    99 +-
 .../hadoop/hive/ql/plan/MapredLocalWork.java    |     4 +-
 .../apache/hadoop/hive/ql/plan/MapredWork.java  |    11 +-
 .../ql/plan/OperatorExplainVectorization.java   |    85 -
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |   119 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |    92 +-
 .../apache/hadoop/hive/ql/plan/SelectDesc.java  |    35 -
 .../hive/ql/plan/SparkHashTableSinkDesc.java    |    25 -
 .../apache/hadoop/hive/ql/plan/SparkWork.java   |     6 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |    26 -
 .../org/apache/hadoop/hive/ql/plan/TezWork.java |    11 +-
 .../hive/ql/plan/VectorAppMasterEventDesc.java  |    35 -
 .../apache/hadoop/hive/ql/plan/VectorDesc.java  |     5 -
 .../hadoop/hive/ql/plan/VectorFileSinkDesc.java |    35 -
 .../hadoop/hive/ql/plan/VectorFilterDesc.java   |    48 -
 .../hadoop/hive/ql/plan/VectorGroupByDesc.java  |    31 -
 .../hadoop/hive/ql/plan/VectorLimitDesc.java    |    35 -
 .../hadoop/hive/ql/plan/VectorMapJoinDesc.java  |   110 -
 .../hadoop/hive/ql/plan/VectorMapJoinInfo.java  |   169 -
 .../hive/ql/plan/VectorReduceSinkDesc.java      |    68 -
 .../hadoop/hive/ql/plan/VectorSMBJoinDesc.java  |    35 -
 .../hadoop/hive/ql/plan/VectorSelectDesc.java   |    56 -
 .../ql/plan/VectorSparkHashTableSinkDesc.java   |    35 -
 .../hive/ql/plan/VectorTableScanDesc.java       |    45 -
 .../hive/ql/plan/VectorizationCondition.java    |    76 -
 .../exec/vector/TestVectorFilterOperator.java   |    15 +-
 .../exec/vector/TestVectorGroupByOperator.java  |    90 +-
 .../exec/vector/TestVectorSelectOperator.java   |    16 -
 .../optimizer/calcite/TestCBOMaxNumToCNF.java   |     5 +-
 .../calcite/TestCBORuleFiredOnlyOnce.java       |     2 +-
 .../ql/optimizer/physical/TestVectorizer.java   |    16 +-
 ...umn_names_with_leading_and_trailing_spaces.q |    15 -
 .../clientpositive/schema_evol_orc_vec_part.q   |    20 +-
 .../schema_evol_orc_vec_part_all_complex.q      |     8 +-
 .../schema_evol_orc_vec_part_all_primitive.q    |    12 +-
 .../clientpositive/schema_evol_orc_vec_table.q  |    12 +-
 .../clientpositive/schema_evol_text_vec_part.q  |    20 +-
 .../schema_evol_text_vec_part_all_complex.q     |     8 +-
 .../schema_evol_text_vec_part_all_primitive.q   |    12 +-
 .../clientpositive/schema_evol_text_vec_table.q |    12 +-
 .../schema_evol_text_vecrow_part.q              |    20 +-
 .../schema_evol_text_vecrow_part_all_complex.q  |     8 +-
 ...schema_evol_text_vecrow_part_all_primitive.q |    12 +-
 .../schema_evol_text_vecrow_table.q             |    12 +-
 .../clientpositive/vector_adaptor_usage_mode.q  |    24 +-
 .../queries/clientpositive/vector_aggregate_9.q |     3 +-
 .../vector_aggregate_without_gby.q              |     4 +-
 .../clientpositive/vector_auto_smb_mapjoin_14.q |    30 +-
 .../clientpositive/vector_between_columns.q     |     4 +-
 .../queries/clientpositive/vector_between_in.q  |    25 +-
 .../clientpositive/vector_binary_join_groupby.q |     7 +-
 .../test/queries/clientpositive/vector_bround.q |     4 +-
 .../test/queries/clientpositive/vector_bucket.q |     3 +-
 .../clientpositive/vector_cast_constant.q       |     4 +-
 .../test/queries/clientpositive/vector_char_2.q |     6 +-
 .../test/queries/clientpositive/vector_char_4.q |     3 +-
 .../queries/clientpositive/vector_char_cast.q   |     2 -
 .../clientpositive/vector_char_mapjoin1.q       |     6 +-
 .../queries/clientpositive/vector_char_simple.q |     8 +-
 .../queries/clientpositive/vector_coalesce.q    |    13 +-
 .../queries/clientpositive/vector_coalesce_2.q  |     8 +-
 .../queries/clientpositive/vector_complex_all.q |     6 +-
 .../clientpositive/vector_complex_join.q        |     4 +-
 .../test/queries/clientpositive/vector_count.q  |     8 +-
 .../clientpositive/vector_count_distinct.q      |     3 +-
 .../queries/clientpositive/vector_data_types.q  |     5 +-
 .../test/queries/clientpositive/vector_date_1.q |     3 +-
 .../queries/clientpositive/vector_decimal_1.q   |     2 +-
 .../clientpositive/vector_decimal_10_0.q        |     2 +-
 .../queries/clientpositive/vector_decimal_2.q   |     2 +-
 .../queries/clientpositive/vector_decimal_3.q   |     2 +-
 .../queries/clientpositive/vector_decimal_4.q   |     2 +-
 .../queries/clientpositive/vector_decimal_5.q   |     2 +-
 .../queries/clientpositive/vector_decimal_6.q   |     2 +-
 .../clientpositive/vector_decimal_aggregate.q   |     6 +-
 .../clientpositive/vector_decimal_cast.q        |     3 +-
 .../clientpositive/vector_decimal_expressions.q |     3 +-
 .../clientpositive/vector_decimal_mapjoin.q     |     3 +-
 .../clientpositive/vector_decimal_math_funcs.q  |     4 +-
 .../clientpositive/vector_decimal_precision.q   |     4 +-
 .../clientpositive/vector_decimal_round.q       |    14 +-
 .../clientpositive/vector_decimal_round_2.q     |    10 +-
 .../clientpositive/vector_decimal_trailing.q    |     2 +-
 .../clientpositive/vector_decimal_udf2.q        |     6 +-
 .../queries/clientpositive/vector_distinct_2.q  |     3 +-
 ql/src/test/queries/clientpositive/vector_elt.q |     5 +-
 .../queries/clientpositive/vector_empty_where.q |     8 +-
 .../queries/clientpositive/vector_groupby4.q    |     2 +-
 .../queries/clientpositive/vector_groupby6.q    |     2 +-
 .../queries/clientpositive/vector_groupby_3.q   |     3 +-
 .../clientpositive/vector_groupby_mapjoin.q     |     4 +-
 .../clientpositive/vector_groupby_reduce.q      |     9 +-
 .../clientpositive/vector_grouping_sets.q       |     4 +-
 .../queries/clientpositive/vector_if_expr.q     |     4 +-
 .../clientpositive/vector_include_no_sel.q      |     3 +-
 .../queries/clientpositive/vector_inner_join.q  |    19 +-
 .../queries/clientpositive/vector_interval_1.q  |    19 +-
 .../queries/clientpositive/vector_interval_2.q  |    22 +-
 .../clientpositive/vector_interval_arithmetic.q |    16 +-
 .../clientpositive/vector_interval_mapjoin.q    |     3 +-
 .../test/queries/clientpositive/vector_join.q   |     1 -
 .../test/queries/clientpositive/vector_join30.q |    16 +-
 .../clientpositive/vector_join_part_col_char.q  |     3 +-
 .../clientpositive/vector_left_outer_join.q     |     4 +-
 .../clientpositive/vector_left_outer_join2.q    |    12 +-
 .../clientpositive/vector_leftsemi_mapjoin.q    |   361 +-
 .../clientpositive/vector_mapjoin_reduce.q      |     5 +-
 .../vector_mr_diff_schema_alias.q               |     3 +-
 .../clientpositive/vector_multi_insert.q        |     4 +-
 .../vector_non_constant_in_expr.q               |     2 +-
 .../vector_non_string_partition.q               |     5 +-
 .../clientpositive/vector_null_projection.q     |     4 +-
 .../clientpositive/vector_nullsafe_join.q       |    21 +-
 .../vector_number_compare_projection.q          |     4 +-
 ql/src/test/queries/clientpositive/vector_nvl.q |     9 +-
 .../queries/clientpositive/vector_orderby_5.q   |     3 +-
 .../queries/clientpositive/vector_outer_join0.q |     5 +-
 .../queries/clientpositive/vector_outer_join1.q |     7 +-
 .../queries/clientpositive/vector_outer_join2.q |     3 +-
 .../queries/clientpositive/vector_outer_join3.q |     7 +-
 .../queries/clientpositive/vector_outer_join4.q |     7 +-
 .../queries/clientpositive/vector_outer_join5.q |    21 +-
 .../queries/clientpositive/vector_outer_join6.q |     5 +-
 .../vector_partition_diff_num_cols.q            |    12 +-
 .../vector_partitioned_date_time.q              |    18 +-
 .../vector_partitioned_date_time_win.q          |    16 +-
 .../queries/clientpositive/vector_reduce1.q     |     3 +-
 .../queries/clientpositive/vector_reduce2.q     |     3 +-
 .../queries/clientpositive/vector_reduce3.q     |     3 +-
 .../vector_reduce_groupby_decimal.q             |     4 +-
 .../clientpositive/vector_string_concat.q       |     5 +-
 .../clientpositive/vector_string_decimal.q      |     2 +-
 .../queries/clientpositive/vector_struct_in.q   |    19 +-
 .../clientpositive/vector_tablesample_rows.q    |     8 +-
 .../test/queries/clientpositive/vector_udf2.q   |     2 +-
 .../test/queries/clientpositive/vector_udf3.q   |     3 +-
 .../queries/clientpositive/vector_varchar_4.q   |     3 +-
 .../clientpositive/vector_varchar_mapjoin1.q    |     6 +-
 .../clientpositive/vector_varchar_simple.q      |     8 +-
 .../clientpositive/vector_when_case_null.q      |     2 +-
 .../queries/clientpositive/vectorization_0.q    |    21 +-
 .../queries/clientpositive/vectorization_1.q    |     1 -
 .../queries/clientpositive/vectorization_10.q   |     1 -
 .../queries/clientpositive/vectorization_11.q   |     1 -
 .../queries/clientpositive/vectorization_12.q   |     1 -
 .../queries/clientpositive/vectorization_13.q   |     6 +-
 .../queries/clientpositive/vectorization_14.q   |     3 +-
 .../queries/clientpositive/vectorization_15.q   |     3 +-
 .../queries/clientpositive/vectorization_16.q   |     3 +-
 .../queries/clientpositive/vectorization_17.q   |     3 +-
 .../queries/clientpositive/vectorization_2.q    |     1 -
 .../queries/clientpositive/vectorization_3.q    |     1 -
 .../queries/clientpositive/vectorization_4.q    |     1 -
 .../queries/clientpositive/vectorization_5.q    |     1 -
 .../queries/clientpositive/vectorization_6.q    |     1 -
 .../queries/clientpositive/vectorization_7.q    |     6 +-
 .../queries/clientpositive/vectorization_8.q    |     6 +-
 .../queries/clientpositive/vectorization_9.q    |     3 +-
 .../clientpositive/vectorization_decimal_date.q |     4 +-
 .../queries/clientpositive/vectorization_div0.q |     7 +-
 .../clientpositive/vectorization_limit.q        |    16 +-
 .../clientpositive/vectorization_nested_udf.q   |     2 -
 .../queries/clientpositive/vectorization_not.q  |     2 -
 .../clientpositive/vectorization_offset_limit.q |     5 +-
 .../queries/clientpositive/vectorization_part.q |     2 -
 .../clientpositive/vectorization_part_project.q |     4 +-
 .../clientpositive/vectorization_part_varchar.q |     2 -
 .../clientpositive/vectorization_pushdown.q     |     4 +-
 .../vectorization_short_regress.q               |    54 +-
 .../clientpositive/vectorized_bucketmapjoin1.q  |     8 +-
 .../queries/clientpositive/vectorized_case.q    |     4 +-
 .../queries/clientpositive/vectorized_casts.q   |     2 +-
 .../queries/clientpositive/vectorized_context.q |     4 +-
 .../clientpositive/vectorized_date_funcs.q      |    11 +-
 .../clientpositive/vectorized_distinct_gby.q    |     5 +-
 .../vectorized_dynamic_partition_pruning.q      |    79 +-
 .../queries/clientpositive/vectorized_mapjoin.q |     3 +-
 .../clientpositive/vectorized_mapjoin2.q        |     2 +-
 .../clientpositive/vectorized_math_funcs.q      |     3 +-
 .../clientpositive/vectorized_nested_mapjoin.q  |     3 +-
 .../queries/clientpositive/vectorized_parquet.q |     4 +-
 .../clientpositive/vectorized_parquet_types.q   |     6 +-
 .../queries/clientpositive/vectorized_ptf.q     |    47 +-
 .../clientpositive/vectorized_shufflejoin.q     |     3 +-
 .../clientpositive/vectorized_string_funcs.q    |     3 +-
 .../clientpositive/vectorized_timestamp.q       |     8 +-
 .../clientpositive/vectorized_timestamp_funcs.q |    15 +-
 .../vectorized_timestamp_ints_casts.q           |     5 +-
 .../results/clientpositive/druid_basic2.q.out   |    48 +-
 .../clientpositive/druid_intervals.q.out        |    40 +-
 .../clientpositive/druid_timeseries.q.out       |    52 +-
 .../results/clientpositive/druid_topn.q.out     |    32 +-
 .../clientpositive/explain_logical.q.out        |    48 +-
 .../clientpositive/groupby_sort_1_23.q.out      |    40 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |    40 +-
 .../results/clientpositive/limit_pushdown.q.out |    12 +-
 .../clientpositive/limit_pushdown3.q.out        |    12 +-
 .../clientpositive/llap/explainuser_4.q.out     |    32 +-
 .../clientpositive/llap/limit_pushdown.q.out    |     9 +-
 .../results/clientpositive/llap/lineage3.q.out  |     2 +-
 .../llap/schema_evol_orc_vec_part.q.out         |   666 +-
 .../schema_evol_orc_vec_part_all_complex.q.out  |   168 +-
 ...schema_evol_orc_vec_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_orc_vec_table.q.out        |   365 +-
 .../llap/schema_evol_text_vec_part.q.out        |   666 +-
 .../schema_evol_text_vec_part_all_complex.q.out |   168 +-
 ...chema_evol_text_vec_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_text_vec_table.q.out       |   365 +-
 .../llap/schema_evol_text_vecrow_part.q.out     |   666 +-
 ...hema_evol_text_vecrow_part_all_complex.q.out |   168 +-
 ...ma_evol_text_vecrow_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_text_vecrow_table.q.out    |   365 +-
 .../llap/table_access_keys_stats.q.out          |     6 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |    42 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |    42 +-
 .../llap/vector_aggregate_9.q.out               |    35 +-
 .../llap/vector_aggregate_without_gby.q.out     |     4 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |  1973 +--
 .../llap/vector_between_columns.q.out           |   115 +-
 .../clientpositive/llap/vector_between_in.q.out |   600 +-
 .../llap/vector_binary_join_groupby.q.out       |   152 +-
 .../clientpositive/llap/vector_bround.q.out     |    15 +-
 .../clientpositive/llap/vector_bucket.q.out     |    27 +-
 .../llap/vector_cast_constant.q.out             |    53 +-
 .../clientpositive/llap/vector_char_2.q.out     |   144 +-
 .../clientpositive/llap/vector_char_4.q.out     |    27 +-
 .../llap/vector_char_mapjoin1.q.out             |   220 +-
 .../llap/vector_char_simple.q.out               |   209 +-
 .../clientpositive/llap/vector_coalesce.q.out   |   473 +-
 .../clientpositive/llap/vector_coalesce_2.q.out |   100 +-
 .../llap/vector_complex_all.q.out               |   106 +-
 .../llap/vector_complex_join.q.out              |    40 +-
 .../clientpositive/llap/vector_count.q.out      |   146 +-
 .../llap/vector_count_distinct.q.out            |    73 +-
 .../clientpositive/llap/vector_data_types.q.out |    53 +-
 .../llap/vector_decimal_aggregate.q.out         |    95 +-
 .../llap/vector_decimal_cast.q.out              |    77 +-
 .../llap/vector_decimal_expressions.q.out       |    50 +-
 .../llap/vector_decimal_mapjoin.q.out           |    59 +-
 .../llap/vector_decimal_math_funcs.q.out        |    69 +-
 .../llap/vector_decimal_precision.q.out         |    35 +-
 .../llap/vector_decimal_round.q.out             |   189 +-
 .../llap/vector_decimal_round_2.q.out           |   173 +-
 .../llap/vector_decimal_udf2.q.out              |    62 +-
 .../clientpositive/llap/vector_distinct_2.q.out |    53 +-
 .../clientpositive/llap/vector_elt.q.out        |   145 +-
 .../clientpositive/llap/vector_groupby4.q.out   |    62 +-
 .../clientpositive/llap/vector_groupby6.q.out   |    62 +-
 .../clientpositive/llap/vector_groupby_3.q.out  |    55 +-
 .../llap/vector_groupby_mapjoin.q.out           |   238 +-
 .../llap/vector_groupby_reduce.q.out            |   286 +-
 .../llap/vector_grouping_sets.q.out             |    53 +-
 .../clientpositive/llap/vector_if_expr.q.out    |    47 +-
 .../llap/vector_include_no_sel.q.out            |    75 +-
 .../clientpositive/llap/vector_inner_join.q.out |   686 +-
 .../clientpositive/llap/vector_interval_1.q.out |   347 +-
 .../clientpositive/llap/vector_interval_2.q.out |   448 +-
 .../llap/vector_interval_arithmetic.q.out       |   334 +-
 .../llap/vector_interval_mapjoin.q.out          |    63 +-
 .../clientpositive/llap/vector_join30.q.out     |   907 +-
 .../llap/vector_join_part_col_char.q.out        |     4 +-
 .../llap/vector_left_outer_join.q.out           |    39 +-
 .../llap/vector_left_outer_join2.q.out          |   230 +-
 .../llap/vector_leftsemi_mapjoin.q.out          | 11096 +++++------------
 .../llap/vector_mapjoin_reduce.q.out            |    88 +-
 .../llap/vector_mr_diff_schema_alias.q.out      |    44 +-
 .../llap/vector_multi_insert.q.out              |    16 +-
 .../llap/vector_null_projection.q.out           |    39 +-
 .../llap/vector_nullsafe_join.q.out             |   628 +-
 .../llap/vector_number_compare_projection.q.out |    68 +-
 .../clientpositive/llap/vector_nvl.q.out        |   265 +-
 .../clientpositive/llap/vector_orderby_5.q.out  |    67 +-
 .../llap/vector_outer_join0.q.out               |   132 +-
 .../llap/vector_outer_join1.q.out               |   252 +-
 .../llap/vector_outer_join2.q.out               |   123 +-
 .../llap/vector_outer_join3.q.out               |   327 +-
 .../llap/vector_outer_join4.q.out               |   242 +-
 .../llap/vector_outer_join5.q.out               |   914 +-
 .../llap/vector_outer_join6.q.out               |   199 +-
 .../llap/vector_partition_diff_num_cols.q.out   |   250 +-
 .../llap/vector_partitioned_date_time.q.out     |  1075 +-
 .../clientpositive/llap/vector_reduce1.q.out    |    42 +-
 .../clientpositive/llap/vector_reduce2.q.out    |    42 +-
 .../clientpositive/llap/vector_reduce3.q.out    |    42 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |    71 +-
 .../llap/vector_string_concat.q.out             |   138 +-
 .../clientpositive/llap/vector_struct_in.q.out  |   568 +-
 .../clientpositive/llap/vector_varchar_4.q.out  |    27 +-
 .../llap/vector_varchar_mapjoin1.q.out          |    93 +-
 .../llap/vector_varchar_simple.q.out            |    99 +-
 .../llap/vector_when_case_null.q.out            |    52 +-
 .../clientpositive/llap/vectorization_0.q.out   |   519 +-
 .../clientpositive/llap/vectorization_13.q.out  |   114 +-
 .../clientpositive/llap/vectorization_14.q.out  |    28 +-
 .../clientpositive/llap/vectorization_15.q.out  |    28 +-
 .../clientpositive/llap/vectorization_16.q.out  |    21 +-
 .../clientpositive/llap/vectorization_17.q.out  |    23 +-
 .../clientpositive/llap/vectorization_7.q.out   |   100 +-
 .../clientpositive/llap/vectorization_8.q.out   |   100 +-
 .../clientpositive/llap/vectorization_9.q.out   |    21 +-
 .../llap/vectorization_decimal_date.q.out       |    76 +-
 .../llap/vectorization_part_project.q.out       |    23 +-
 .../llap/vectorization_pushdown.q.out           |    21 +-
 .../llap/vectorization_short_regress.q.out      |  1002 +-
 .../llap/vectorized_bucketmapjoin1.q.out        |   108 +-
 .../clientpositive/llap/vectorized_case.q.out   |    62 +-
 .../clientpositive/llap/vectorized_casts.q.out  |    16 +-
 .../llap/vectorized_context.q.out               |    32 +-
 .../llap/vectorized_date_funcs.q.out            |   314 +-
 .../llap/vectorized_distinct_gby.q.out          |    51 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  1159 +-
 .../llap/vectorized_mapjoin.q.out               |    71 +-
 .../llap/vectorized_math_funcs.q.out            |    69 +-
 .../llap/vectorized_nested_mapjoin.q.out        |    39 +-
 .../llap/vectorized_parquet.q.out               |    25 +-
 .../llap/vectorized_parquet_types.q.out         |    12 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |   668 +-
 .../llap/vectorized_shufflejoin.q.out           |    73 +-
 .../llap/vectorized_string_funcs.q.out          |    54 +-
 .../llap/vectorized_timestamp.q.out             |    16 +-
 .../llap/vectorized_timestamp_funcs.q.out       |   292 +-
 .../llap/vectorized_timestamp_ints_casts.q.out  |   138 +-
 .../offset_limit_ppd_optimizer.q.out            |    12 +-
 .../results/clientpositive/perf/query75.q.out   |    12 +-
 .../spark/groupby_sort_1_23.q.out               |    32 +-
 .../spark/groupby_sort_skew_1_23.q.out          |    32 +-
 .../clientpositive/spark/limit_pushdown.q.out   |     9 +-
 .../spark/table_access_keys_stats.q.out         |     6 +-
 .../spark/vector_between_in.q.out               |   604 +-
 .../spark/vector_cast_constant.q.out            |    53 +-
 .../clientpositive/spark/vector_char_4.q.out    |    27 +-
 .../spark/vector_count_distinct.q.out           |    74 +-
 .../spark/vector_data_types.q.out               |    53 +-
 .../spark/vector_decimal_aggregate.q.out        |    96 +-
 .../spark/vector_decimal_mapjoin.q.out          |    58 +-
 .../spark/vector_distinct_2.q.out               |    54 +-
 .../clientpositive/spark/vector_elt.q.out       |   143 +-
 .../clientpositive/spark/vector_groupby_3.q.out |    56 +-
 .../spark/vector_inner_join.q.out               |   678 +-
 .../spark/vector_left_outer_join.q.out          |    39 +-
 .../spark/vector_mapjoin_reduce.q.out           |    72 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    68 +-
 .../spark/vector_outer_join0.q.out              |   130 +-
 .../spark/vector_outer_join1.q.out              |   248 +-
 .../spark/vector_outer_join2.q.out              |   121 +-
 .../spark/vector_outer_join3.q.out              |   342 +-
 .../spark/vector_outer_join4.q.out              |   254 +-
 .../spark/vector_outer_join5.q.out              |   968 +-
 .../spark/vector_string_concat.q.out            |   137 +-
 .../clientpositive/spark/vector_varchar_4.q.out |    27 +-
 .../clientpositive/spark/vectorization_0.q.out  |   519 +-
 .../clientpositive/spark/vectorization_13.q.out |   114 +-
 .../clientpositive/spark/vectorization_14.q.out |    28 +-
 .../clientpositive/spark/vectorization_15.q.out |    28 +-
 .../clientpositive/spark/vectorization_16.q.out |    21 +-
 .../clientpositive/spark/vectorization_17.q.out |    23 +-
 .../clientpositive/spark/vectorization_7.q.out  |   118 +-
 .../clientpositive/spark/vectorization_8.q.out  |   146 +-
 .../clientpositive/spark/vectorization_9.q.out  |    21 +-
 .../spark/vectorization_decimal_date.q.out      |    75 +-
 .../spark/vectorization_div0.q.out              |   167 +-
 .../spark/vectorization_part_project.q.out      |    23 +-
 .../spark/vectorization_pushdown.q.out          |    21 +-
 .../spark/vectorization_short_regress.q.out     |  1002 +-
 .../spark/vectorized_bucketmapjoin1.q.out       |   135 +-
 .../clientpositive/spark/vectorized_case.q.out  |    62 +-
 .../spark/vectorized_mapjoin.q.out              |    70 +-
 .../spark/vectorized_math_funcs.q.out           |    68 +-
 .../spark/vectorized_nested_mapjoin.q.out       |    39 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |   693 +-
 .../spark/vectorized_shufflejoin.q.out          |    80 +-
 .../spark/vectorized_string_funcs.q.out         |    53 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   292 +-
 .../clientpositive/tez/explainanalyze_4.q.out   |    32 +-
 .../clientpositive/tez/explainanalyze_5.q.out   |     6 +-
 ...names_with_leading_and_trailing_spaces.q.out |    65 -
 .../clientpositive/tez/unionDistinct_2.q.out    |     6 +-
 .../tez/vector_join_part_col_char.q.out         |    44 +-
 .../tez/vector_non_string_partition.q.out       |    98 +-
 .../clientpositive/tez/vectorization_div0.q.out |   167 +-
 .../tez/vectorization_limit.q.out               |   348 +-
 .../vector_adaptor_usage_mode.q.out             |   230 +-
 .../clientpositive/vector_aggregate_9.q.out     |    34 +-
 .../vector_aggregate_without_gby.q.out          |    20 +-
 .../vector_auto_smb_mapjoin_14.q.out            |   604 +-
 .../clientpositive/vector_between_columns.q.out |    75 +-
 .../clientpositive/vector_between_in.q.out      |   466 +-
 .../vector_binary_join_groupby.q.out            |    96 +-
 .../results/clientpositive/vector_bround.q.out  |    16 +-
 .../results/clientpositive/vector_bucket.q.out  |    16 +-
 .../clientpositive/vector_cast_constant.q.out   |    43 +-
 .../results/clientpositive/vector_char_2.q.out  |    96 +-
 .../results/clientpositive/vector_char_4.q.out  |    27 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |   123 +-
 .../clientpositive/vector_char_simple.q.out     |   180 +-
 .../clientpositive/vector_coalesce.q.out        |   394 +-
 .../clientpositive/vector_coalesce_2.q.out      |    83 +-
 .../clientpositive/vector_complex_all.q.out     |    84 +-
 .../clientpositive/vector_complex_join.q.out    |    28 +-
 .../results/clientpositive/vector_count.q.out   |   142 +-
 .../clientpositive/vector_count_distinct.q.out  |    39 +-
 .../clientpositive/vector_data_types.q.out      |    40 +-
 .../vector_decimal_aggregate.q.out              |    74 +-
 .../clientpositive/vector_decimal_cast.q.out    |    34 +-
 .../vector_decimal_expressions.q.out            |    37 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |    36 +-
 .../vector_decimal_math_funcs.q.out             |    31 +-
 .../vector_decimal_precision.q.out              |    34 +-
 .../clientpositive/vector_decimal_round.q.out   |   129 +-
 .../clientpositive/vector_decimal_round_2.q.out |   132 +-
 .../clientpositive/vector_decimal_udf2.q.out    |    62 +-
 .../clientpositive/vector_distinct_2.q.out      |    38 +-
 .../results/clientpositive/vector_elt.q.out     |   101 +-
 .../clientpositive/vector_empty_where.q.out     |   168 +-
 .../clientpositive/vector_groupby4.q.out        |    41 +-
 .../clientpositive/vector_groupby6.q.out        |    41 +-
 .../clientpositive/vector_groupby_3.q.out       |    39 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |    24 +-
 .../clientpositive/vector_groupby_reduce.q.out  |   178 +-
 .../clientpositive/vector_grouping_sets.q.out   |    36 +-
 .../results/clientpositive/vector_if_expr.q.out |    37 +-
 .../clientpositive/vector_include_no_sel.q.out  |    47 +-
 .../clientpositive/vector_inner_join.q.out      |   396 +-
 .../clientpositive/vector_interval_1.q.out      |   264 +-
 .../clientpositive/vector_interval_2.q.out      |   348 +-
 .../vector_interval_arithmetic.q.out            |   260 +-
 .../vector_interval_mapjoin.q.out               |    40 +-
 .../results/clientpositive/vector_join30.q.out  |   700 +-
 .../vector_join_part_col_char.q.out             |    15 +-
 .../clientpositive/vector_left_outer_join.q.out |    20 +-
 .../vector_left_outer_join2.q.out               |   156 +-
 .../vector_leftsemi_mapjoin.q.out               |  6989 +++++------
 .../clientpositive/vector_mapjoin_reduce.q.out  |    56 +-
 .../vector_mr_diff_schema_alias.q.out           |    38 +-
 .../clientpositive/vector_multi_insert.q.out    |    16 +-
 .../vector_non_constant_in_expr.q.out           |    14 +-
 .../vector_non_string_partition.q.out           |    72 +-
 .../clientpositive/vector_null_projection.q.out |    29 +-
 .../clientpositive/vector_nullsafe_join.q.out   |   326 +-
 .../vector_number_compare_projection.q.out      |    66 +-
 .../results/clientpositive/vector_nvl.q.out     |   181 +-
 .../clientpositive/vector_orderby_5.q.out       |    47 +-
 .../clientpositive/vector_outer_join0.q.out     |    74 +-
 .../clientpositive/vector_outer_join1.q.out     |   126 +-
 .../clientpositive/vector_outer_join2.q.out     |    53 +-
 .../clientpositive/vector_outer_join3.q.out     |   309 +-
 .../clientpositive/vector_outer_join4.q.out     |   235 +-
 .../clientpositive/vector_outer_join5.q.out     |   882 +-
 .../clientpositive/vector_outer_join6.q.out     |   190 +-
 .../vector_partition_diff_num_cols.q.out        |   190 +-
 .../vector_partitioned_date_time.q.out          |   348 +-
 .../results/clientpositive/vector_reduce1.q.out |    32 +-
 .../results/clientpositive/vector_reduce2.q.out |    32 +-
 .../results/clientpositive/vector_reduce3.q.out |    32 +-
 .../vector_reduce_groupby_decimal.q.out         |    47 +-
 .../clientpositive/vector_string_concat.q.out   |    77 +-
 .../clientpositive/vector_string_decimal.q.out  |    14 +-
 .../clientpositive/vector_struct_in.q.out       |   228 +-
 .../vector_tablesample_rows.q.out               |    88 +-
 .../results/clientpositive/vector_udf2.q.out    |    30 +-
 .../results/clientpositive/vector_udf3.q.out    |    27 +-
 .../clientpositive/vector_varchar_4.q.out       |    27 +-
 .../vector_varchar_mapjoin1.q.out               |    60 +-
 .../clientpositive/vector_varchar_simple.q.out  |    75 +-
 .../clientpositive/vector_when_case_null.q.out  |    40 +-
 .../clientpositive/vectorization_0.q.out        |   398 +-
 .../clientpositive/vectorization_13.q.out       |    94 +-
 .../clientpositive/vectorization_14.q.out       |    28 +-
 .../clientpositive/vectorization_15.q.out       |    28 +-
 .../clientpositive/vectorization_16.q.out       |    20 +-
 .../clientpositive/vectorization_17.q.out       |    20 +-
 .../clientpositive/vectorization_7.q.out        |    74 +-
 .../clientpositive/vectorization_8.q.out        |    74 +-
 .../clientpositive/vectorization_9.q.out        |    20 +-
 .../vectorization_decimal_date.q.out            |    33 +-
 .../clientpositive/vectorization_div0.q.out     |   104 +-
 .../clientpositive/vectorization_limit.q.out    |   236 +-
 .../vectorization_offset_limit.q.out            |    52 +-
 .../vectorization_part_project.q.out            |    20 +-
 .../clientpositive/vectorization_pushdown.q.out |    20 +-
 .../vectorization_short_regress.q.out           |   800 +-
 .../vectorized_bucketmapjoin1.q.out             |    99 +-
 .../clientpositive/vectorized_case.q.out        |    62 +-
 .../clientpositive/vectorized_casts.q.out       |    16 +-
 .../clientpositive/vectorized_context.q.out     |    16 +-
 .../clientpositive/vectorized_date_funcs.q.out  |   157 +-
 .../vectorized_distinct_gby.q.out               |    40 +-
 .../clientpositive/vectorized_mapjoin.q.out     |    48 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |    47 +-
 .../clientpositive/vectorized_math_funcs.q.out  |    31 +-
 .../vectorized_nested_mapjoin.q.out             |    20 +-
 .../clientpositive/vectorized_parquet.q.out     |    28 +-
 .../vectorized_parquet_types.q.out              |    48 +-
 .../results/clientpositive/vectorized_ptf.q.out |   728 +-
 .../clientpositive/vectorized_shufflejoin.q.out |    31 +-
 .../vectorized_string_funcs.q.out               |    16 +-
 .../clientpositive/vectorized_timestamp.q.out   |    80 +-
 .../vectorized_timestamp_funcs.q.out            |   222 +-
 .../vectorized_timestamp_ints_casts.q.out       |    62 +-
 .../auth/TestLdapAtnProviderWithMiniDS.java     |  1093 +-
 .../auth/ldap/LdapAuthenticationTestCase.java   |   142 +
 .../org/apache/hive/service/auth/ldap/User.java |    99 +
 840 files changed, 19627 insertions(+), 62173 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3f34134a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index d7d6e38,07ed4fd..f6331f2
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@@ -487,23 -474,4 +487,7 @@@ public class FileSinkDesc extends Abstr
      this.statsTmpDir = statsCollectionTempDir;
    }
  
 +  public void setMmWriteId(Long mmWriteId) {
 +    this.mmWriteId = mmWriteId;
 +  }
- 
-   public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization {
- 
-     public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) {
-       // Native vectorization not supported.
-       super(vectorDesc, false);
-     }
-   }
- 
-   @Explain(vectorization = Vectorization.OPERATOR, displayName = "File Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-   public FileSinkOperatorExplainVectorization getFileSinkVectorization() {
-     if (vectorDesc == null) {
-       return null;
-     }
-     return new FileSinkOperatorExplainVectorization(vectorDesc);
-   }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/3f34134a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------


[24/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join30.q.out b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
index 9e591b8..bb6916b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
@@ -14,7 +14,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orcsrc
 POSTHOOK: Lineage: orcsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: orcsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -22,7 +22,7 @@ JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -30,10 +30,6 @@ JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -53,93 +49,40 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: string)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF sum parameter expression for GROUPBY operator: UDF GenericUDFHash(Column[_col2], Column[_col3]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
@@ -166,30 +109,14 @@ STAGE PLANS:
                       value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -197,30 +124,15 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
 
@@ -249,7 +161,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 103231310608
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -257,7 +169,7 @@ LEFT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -265,10 +177,6 @@ LEFT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -288,79 +196,34 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF sum parameter expression for GROUPBY operator: UDF GenericUDFHash(Column[_col2], Column[_col3]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
@@ -387,30 +250,14 @@ STAGE PLANS:
                       value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -418,30 +265,15 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
 
@@ -470,7 +302,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 103231310608
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN
@@ -478,7 +310,7 @@ RIGHT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN
@@ -486,10 +318,6 @@ RIGHT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -509,106 +337,46 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF sum parameter expression for GROUPBY operator: UDF GenericUDFHash(Column[_col2], Column[_col3]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
@@ -635,30 +403,14 @@ STAGE PLANS:
                       value expressions: _col0 (type: bigint)
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -690,7 +442,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 103231310608
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -701,7 +453,7 @@ JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -712,10 +464,6 @@ JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -736,136 +484,59 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: string)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: string)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF sum parameter expression for GROUPBY operator: UDF GenericUDFHash(Column[_col2], Column[_col3]) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
@@ -895,30 +566,14 @@ STAGE PLANS:
                       value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -926,58 +581,28 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 7 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1011,7 +636,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 348019368476
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -1022,7 +647,7 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -1033,10 +658,6 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1058,134 +679,59 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1211,30 +757,14 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1242,58 +772,28 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1327,7 +827,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 348019368476
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -1338,7 +838,7 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -1349,10 +849,6 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1374,134 +870,59 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1527,30 +948,14 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1558,58 +963,28 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1643,7 +1018,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 348019368476
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -1654,7 +1029,7 @@ RIGHT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -1665,10 +1040,6 @@ RIGHT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1690,134 +1061,59 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1843,30 +1139,14 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1874,58 +1154,28 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1]
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1959,7 +1209,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orcsrc
 #### A masked pattern was here ####
 348019368476
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN
@@ -1970,7 +1220,7 @@ RIGHT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN
@@ -1981,10 +1231,6 @@ RIGHT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2006,134 +1252,59 @@ STAGE PLANS:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: orcsrc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), value (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-           

<TRUNCATED>

[44/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 78b2e8b..ebe613e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.parse.TableSample;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde.serdeConstants;
 
 
@@ -397,29 +396,4 @@ public class TableScanDesc extends AbstractOperatorDesc {
     return opProps;
   }
 
-  public class TableScanOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final TableScanDesc tableScanDesc;
-    private final VectorTableScanDesc vectorTableScanDesc;
-
-    public TableScanOperatorExplainVectorization(TableScanDesc tableScanDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-      this.tableScanDesc = tableScanDesc;
-      vectorTableScanDesc = (VectorTableScanDesc) vectorDesc;
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "projectedOutputColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getProjectedOutputColumns() {
-      return Arrays.toString(vectorTableScanDesc.getProjectedOutputColumns());
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "TableScan Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public TableScanOperatorExplainVectorization getTableScanVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new TableScanOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
index a037ea3..7a70e6b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hive.ql.exec.tez.DagUtils;
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
+
 
 /**
  * TezWork. This class encapsulates all the work objects that can be executed
@@ -49,8 +49,7 @@ import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
  *
  */
 @SuppressWarnings("serial")
-@Explain(displayName = "Tez", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-    vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Tez", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
 public class TezWork extends AbstractOperatorDesc {
 
   public enum VertexType {
@@ -108,8 +107,7 @@ public class TezWork extends AbstractOperatorDesc {
   /**
    * getWorkMap returns a map of "vertex name" to BaseWork
    */
-  @Explain(displayName = "Vertices", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Vertices", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public Map<String, BaseWork> getWorkMap() {
     Map<String, BaseWork> result = new LinkedHashMap<String, BaseWork>();
     for (BaseWork w: getAllWork()) {
@@ -308,8 +306,7 @@ public class TezWork extends AbstractOperatorDesc {
     }
   }
 
-  @Explain(displayName = "Edges", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Edges", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public Map<String, List<Dependency>> getDependencyMap() {
     Map<String, List<Dependency>> result = new LinkedHashMap<String, List<Dependency>>();
     for (Map.Entry<BaseWork, List<BaseWork>> entry: invertedWorkGraph.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java
deleted file mode 100644
index 2e11321..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorAppMasterEventDesc.
- *
- * Extra parameters beyond AppMasterEventDesc just for the VectorAppMasterEventDescOperator.
- *
- * We don't extend AppMasterEventDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorAppMasterEventDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  public VectorAppMasterEventDesc() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorDesc.java
index 078408c..3a2efdb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorDesc.java
@@ -22,9 +22,4 @@ import java.io.Serializable;
 
 public interface VectorDesc extends Serializable, Cloneable {
   public Object clone() throws CloneNotSupportedException;
-
-  public void setVectorOp(Class<?> vectorOpClass);
-
-  public Class<?> getVectorOpClass();
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java
deleted file mode 100644
index 325ac91..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorFileSinkDesc.
- *
- * Extra parameters beyond FileSinkDesc just for the VectorFileSinkOperator.
- *
- * We don't extend FileSinkDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorFileSinkDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  public VectorFileSinkDesc() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java
deleted file mode 100644
index 6feed84..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-
-/**
- * VectorFilterDesc.
- *
- * Extra parameters beyond FilterDesc just for the VectorFilterOperator.
- *
- * We don't extend FilterDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorFilterDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  private VectorExpression predicateExpression;
-
-  public VectorFilterDesc() {
-  }
-
-  public void setPredicateExpression(VectorExpression predicateExpression) {
-    this.predicateExpression = predicateExpression;
-  }
-
-  public VectorExpression getPredicateExpression() {
-    return predicateExpression;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
index f8554e2..08f8ebf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
-
 /**
  * VectorGroupByDesc.
  *
@@ -62,10 +59,6 @@ public class VectorGroupByDesc extends AbstractVectorDesc  {
 
   private boolean isVectorOutput;
 
-  private VectorExpression[] keyExpressions;
-  private VectorAggregateExpression[] aggregators;
-  private int[] projectedOutputColumns;
-
   public VectorGroupByDesc() {
     this.processingMode = ProcessingMode.NONE;
     this.isVectorOutput = false;
@@ -86,30 +79,6 @@ public class VectorGroupByDesc extends AbstractVectorDesc  {
     this.isVectorOutput = isVectorOutput;
   }
 
-  public void setKeyExpressions(VectorExpression[] keyExpressions) {
-    this.keyExpressions = keyExpressions;
-  }
-
-  public VectorExpression[] getKeyExpressions() {
-    return keyExpressions;
-  }
-
-  public void setAggregators(VectorAggregateExpression[] aggregators) {
-    this.aggregators = aggregators;
-  }
-
-  public VectorAggregateExpression[] getAggregators() {
-    return aggregators;
-  }
-
-  public void setProjectedOutputColumns(int[] projectedOutputColumns) {
-    this.projectedOutputColumns = projectedOutputColumns;
-  }
-
-  public int[] getProjectedOutputColumns() {
-    return projectedOutputColumns;
-  }
-
   /**
    * Which ProcessingMode for VectorGroupByOperator?
    *

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java
deleted file mode 100644
index c9bc45a..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorLimitDesc.
- *
- * Extra parameters beyond LimitDesc just for the VectorLimitOperator.
- *
- * We don't extend LimitDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorLimitDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  public VectorLimitDesc() {
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java
index 1252140..8ea230f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java
@@ -18,13 +18,9 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import java.util.List;
-
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
-import com.google.common.base.Preconditions;
-
 /**
  * VectorGroupByDesc.
  *
@@ -83,38 +79,23 @@ public class VectorMapJoinDesc extends AbstractVectorDesc  {
     }
   }
 
-  public static enum OperatorVariation {
-    NONE,
-    INNER_BIG_ONLY,
-    INNER,
-    LEFT_SEMI,
-    OUTER
-  }
-
   private HashTableImplementationType hashTableImplementationType;
   private HashTableKind hashTableKind;
   private HashTableKeyType hashTableKeyType;
-  private OperatorVariation operatorVariation;
   private boolean minMaxEnabled;
 
-  private VectorMapJoinInfo vectorMapJoinInfo;
-
   public VectorMapJoinDesc() {
     hashTableImplementationType = HashTableImplementationType.NONE;
     hashTableKind = HashTableKind.NONE;
     hashTableKeyType = HashTableKeyType.NONE;
-    operatorVariation = OperatorVariation.NONE;
     minMaxEnabled = false;
-    vectorMapJoinInfo = null;
   }
 
   public VectorMapJoinDesc(VectorMapJoinDesc clone) {
     this.hashTableImplementationType = clone.hashTableImplementationType;
     this.hashTableKind = clone.hashTableKind;
     this.hashTableKeyType = clone.hashTableKeyType;
-    this.operatorVariation = clone.operatorVariation;
     this.minMaxEnabled = clone.minMaxEnabled;
-    this.vectorMapJoinInfo = clone.vectorMapJoinInfo;
   }
 
   public HashTableImplementationType hashTableImplementationType() {
@@ -141,14 +122,6 @@ public class VectorMapJoinDesc extends AbstractVectorDesc  {
     this.hashTableKeyType = hashTableKeyType;
   }
 
-  public OperatorVariation operatorVariation() {
-    return operatorVariation;
-  }
-
-  public void setOperatorVariation(OperatorVariation operatorVariation) {
-    this.operatorVariation = operatorVariation;
-  }
-
   public boolean minMaxEnabled() {
     return minMaxEnabled;
   }
@@ -156,87 +129,4 @@ public class VectorMapJoinDesc extends AbstractVectorDesc  {
   public void setMinMaxEnabled(boolean minMaxEnabled) {
     this.minMaxEnabled = minMaxEnabled;
   }
-
-  public void setVectorMapJoinInfo(VectorMapJoinInfo vectorMapJoinInfo) {
-    Preconditions.checkState(vectorMapJoinInfo != null);
-    this.vectorMapJoinInfo = vectorMapJoinInfo;
-  }
-
-  public VectorMapJoinInfo getVectorMapJoinInfo() {
-    return vectorMapJoinInfo;
-  }
-
-  private boolean isVectorizationMapJoinNativeEnabled;
-  private String engine;
-  private boolean oneMapJoinCondition;
-  private boolean hasNullSafes;
-  private boolean isFastHashTableEnabled;
-  private boolean isHybridHashJoin;
-  private boolean supportsKeyTypes;
-  private List<String> notSupportedKeyTypes;
-  private boolean isEmptyKey;
-  private boolean smallTableExprVectorizes;
-
-  public void setIsVectorizationMapJoinNativeEnabled(boolean isVectorizationMapJoinNativeEnabled) {
-    this.isVectorizationMapJoinNativeEnabled = isVectorizationMapJoinNativeEnabled;
-  }
-  public boolean getIsVectorizationMapJoinNativeEnabled() {
-    return isVectorizationMapJoinNativeEnabled;
-  }
-  public void setEngine(String engine) {
-    this.engine = engine;
-  }
-  public String getEngine() {
-    return engine;
-  }
-  public void setOneMapJoinCondition(boolean oneMapJoinCondition) {
-    this.oneMapJoinCondition = oneMapJoinCondition;
-  }
-  public boolean getOneMapJoinCondition() {
-    return oneMapJoinCondition;
-  }
-  public void setHasNullSafes(boolean hasNullSafes) {
-    this.hasNullSafes = hasNullSafes;
-  }
-  public boolean getHasNullSafes() {
-    return hasNullSafes;
-  }
-  public void setSupportsKeyTypes(boolean supportsKeyTypes) {
-    this.supportsKeyTypes = supportsKeyTypes;
-  }
-  public boolean getSupportsKeyTypes() {
-    return supportsKeyTypes;
-  }
-  public void setNotSupportedKeyTypes(List<String> notSupportedKeyTypes) {
-    this.notSupportedKeyTypes = notSupportedKeyTypes;
-  }
-  public List<String> getNotSupportedKeyTypes() {
-    return notSupportedKeyTypes;
-  }
-  public void setIsEmptyKey(boolean isEmptyKey) {
-    this.isEmptyKey = isEmptyKey;
-  }
-  public boolean getIsEmptyKey() {
-    return isEmptyKey;
-  }
-  public void setSmallTableExprVectorizes(boolean smallTableExprVectorizes) {
-    this.smallTableExprVectorizes = smallTableExprVectorizes;
-  }
-  public boolean getSmallTableExprVectorizes() {
-    return smallTableExprVectorizes;
-  }
-
-  public void setIsFastHashTableEnabled(boolean isFastHashTableEnabled) {
-    this.isFastHashTableEnabled = isFastHashTableEnabled;
-  }
-  public boolean getIsFastHashTableEnabled() {
-    return isFastHashTableEnabled;
-  }
-  public void setIsHybridHashJoin(boolean isHybridHashJoin) {
-    this.isHybridHashJoin = isHybridHashJoin;
-  }
-  public boolean getIsHybridHashJoin() {
-    return isHybridHashJoin;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java
deleted file mode 100644
index 2cf2e72..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping;
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-
-/**
- * VectorMapJoinInfo.
- *
- * A convenience data structure that has information needed to vectorize map join.
- *
- * It is created by the Vectorizer when it is determining whether it can specialize so the
- * information doesn't have to be recreated again and again by the VectorMapJoinOperator's
- * constructors and later during execution.
- */
-public class VectorMapJoinInfo {
-
-  private static long serialVersionUID = 1L;
-
-  private int[] bigTableKeyColumnMap;
-  private String[] bigTableKeyColumnNames;
-  private TypeInfo[] bigTableKeyTypeInfos;
-  private VectorExpression[] bigTableKeyExpressions;
-
-  private int[] bigTableValueColumnMap;
-  private String[] bigTableValueColumnNames;
-  private TypeInfo[] bigTableValueTypeInfos;
-  private VectorExpression[] bigTableValueExpressions;
-
-  private VectorColumnOutputMapping bigTableRetainedMapping;
-  private VectorColumnOutputMapping bigTableOuterKeyMapping;
-  private VectorColumnSourceMapping smallTableMapping;
-
-  private VectorColumnSourceMapping projectionMapping;
-
-  public VectorMapJoinInfo() {
-    bigTableKeyColumnMap = null;
-    bigTableKeyColumnNames = null;
-    bigTableKeyTypeInfos = null;
-    bigTableKeyExpressions = null;
-
-    bigTableValueColumnMap = null;
-    bigTableValueColumnNames = null;
-    bigTableValueTypeInfos = null;
-    bigTableValueExpressions = null;
-
-    bigTableRetainedMapping = null;
-    bigTableOuterKeyMapping = null;
-    smallTableMapping = null;
-
-    projectionMapping = null;
-  }
-
-  public int[] getBigTableKeyColumnMap() {
-    return bigTableKeyColumnMap;
-  }
-
-  public void setBigTableKeyColumnMap(int[] bigTableKeyColumnMap) {
-    this.bigTableKeyColumnMap = bigTableKeyColumnMap;
-  }
-
-  public String[] getBigTableKeyColumnNames() {
-    return bigTableKeyColumnNames;
-  }
-
-  public void setBigTableKeyColumnNames(String[] bigTableKeyColumnNames) {
-    this.bigTableKeyColumnNames = bigTableKeyColumnNames;
-  }
-
-  public TypeInfo[] getBigTableKeyTypeInfos() {
-    return bigTableKeyTypeInfos;
-  }
-
-  public void setBigTableKeyTypeInfos(TypeInfo[] bigTableKeyTypeInfos) {
-    this.bigTableKeyTypeInfos = bigTableKeyTypeInfos;
-  }
-
-  public VectorExpression[] getBigTableKeyExpressions() {
-    return bigTableKeyExpressions;
-  }
-
-  public void setBigTableKeyExpressions(VectorExpression[] bigTableKeyExpressions) {
-    this.bigTableKeyExpressions = bigTableKeyExpressions;
-  }
-
-
-  public int[] getBigTableValueColumnMap() {
-    return bigTableValueColumnMap;
-  }
-
-  public void setBigTableValueColumnMap(int[] bigTableValueColumnMap) {
-    this.bigTableValueColumnMap = bigTableValueColumnMap;
-  }
-
-  public String[] getBigTableValueColumnNames() {
-    return bigTableValueColumnNames;
-  }
-
-  public void setBigTableValueColumnNames(String[] bigTableValueColumnNames) {
-    this.bigTableValueColumnNames = bigTableValueColumnNames;
-  }
-
-  public TypeInfo[] getBigTableValueTypeInfos() {
-    return bigTableValueTypeInfos;
-  }
-
-  public void setBigTableValueTypeInfos(TypeInfo[] bigTableValueTypeInfos) {
-    this.bigTableValueTypeInfos = bigTableValueTypeInfos;
-  }
-
-  public VectorExpression[] getBigTableValueExpressions() {
-    return bigTableValueExpressions;
-  }
-
-  public void setBigTableValueExpressions(VectorExpression[] bigTableValueExpressions) {
-    this.bigTableValueExpressions = bigTableValueExpressions;
-  }
-
-  public void setBigTableRetainedMapping(VectorColumnOutputMapping bigTableRetainedMapping) {
-    this.bigTableRetainedMapping = bigTableRetainedMapping;
-  }
-
-  public VectorColumnOutputMapping getBigTableRetainedMapping() {
-    return bigTableRetainedMapping;
-  }
-
-  public void setBigTableOuterKeyMapping(VectorColumnOutputMapping bigTableOuterKeyMapping) {
-    this.bigTableOuterKeyMapping = bigTableOuterKeyMapping;
-  }
-
-  public VectorColumnOutputMapping getBigTableOuterKeyMapping() {
-    return bigTableOuterKeyMapping;
-  }
-
-  public void setSmallTableMapping(VectorColumnSourceMapping smallTableMapping) {
-    this.smallTableMapping = smallTableMapping;
-  }
-
-  public VectorColumnSourceMapping getSmallTableMapping() {
-    return smallTableMapping;
-  }
-
-  public void setProjectionMapping(VectorColumnSourceMapping projectionMapping) {
-    this.projectionMapping = projectionMapping;
-  }
-
-  public VectorColumnSourceMapping getProjectionMapping() {
-    return projectionMapping;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
index 288a440..c56bff6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
@@ -61,72 +61,4 @@ public class VectorReduceSinkDesc extends AbstractVectorDesc  {
   public VectorReduceSinkInfo getVectorReduceSinkInfo() {
     return vectorReduceSinkInfo;
   }
-
-  private boolean isVectorizationReduceSinkNativeEnabled;
-  private String engine;
-  private boolean acidChange;
-  private boolean hasBuckets;
-  private boolean hasTopN;
-  private boolean useUniformHash;
-  private boolean hasDistinctColumns;
-  private boolean isKeyBinarySortable;
-  private boolean isValueLazyBinary;
-
-  /*
-   * The following conditions are for native Vector ReduceSink.
-   */
-  public void setIsVectorizationReduceSinkNativeEnabled(boolean isVectorizationReduceSinkNativeEnabled) {
-    this.isVectorizationReduceSinkNativeEnabled = isVectorizationReduceSinkNativeEnabled;
-  }
-  public boolean getIsVectorizationReduceSinkNativeEnabled() {
-    return isVectorizationReduceSinkNativeEnabled;
-  }
-  public void setEngine(String engine) {
-    this.engine = engine;
-  }
-  public String getEngine() {
-    return engine;
-  }
-  public void setAcidChange(boolean acidChange) {
-    this.acidChange = acidChange;
-  }
-  public boolean getAcidChange() {
-    return acidChange;
-  }
-  public void setHasBuckets(boolean hasBuckets) {
-    this.hasBuckets = hasBuckets;
-  }  
-  public boolean getHasBuckets() {
-    return hasBuckets;
-  }
-  public void setHasTopN(boolean hasTopN) {
-    this.hasTopN = hasTopN;
-  }
-  public boolean getHasTopN() {
-    return hasTopN;
-  }
-  public void setUseUniformHash(boolean useUniformHash) {
-    this.useUniformHash = useUniformHash;
-  }
-  public boolean getUseUniformHash() {
-    return useUniformHash;
-  }
-  public void setHasDistinctColumns(boolean hasDistinctColumns) {
-    this.hasDistinctColumns = hasDistinctColumns;
-  }
-  public boolean getHasDistinctColumns() {
-    return hasDistinctColumns;
-  }
-  public void setIsKeyBinarySortable(boolean isKeyBinarySortable) {
-    this.isKeyBinarySortable = isKeyBinarySortable;
-  }
-  public boolean getIsKeyBinarySortable() {
-    return isKeyBinarySortable;
-  }
-  public void setIsValueLazyBinary(boolean isValueLazyBinary) {
-    this.isValueLazyBinary = isValueLazyBinary;
-  }
-  public boolean getIsValueLazyBinary() {
-    return isValueLazyBinary;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java
deleted file mode 100644
index ab578cd..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorSMBMapJoinDesc.
- *
- * Extra parameters beyond SMBMapJoinDesc just for the VectorSMBMapJoinOperator.
- *
- * We don't extend SMBMapJoinDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorSMBJoinDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  public VectorSMBJoinDesc() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java
deleted file mode 100644
index c2c9450..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-
-/**
- * VectorSelectDesc.
- *
- * Extra parameters beyond SelectDesc just for the VectorSelectOperator.
- *
- * We don't extend SelectDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorSelectDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  private VectorExpression[] selectExpressions;
-  private int[] projectedOutputColumns;
-
-  public VectorSelectDesc() {
-  }
-
-  public void setSelectExpressions(VectorExpression[] selectExpressions) {
-    this.selectExpressions = selectExpressions;
-  }
-
-  public VectorExpression[] getSelectExpressions() {
-    return selectExpressions;
-  }
-
-  public void setProjectedOutputColumns(int[] projectedOutputColumns) {
-    this.projectedOutputColumns = projectedOutputColumns;
-  }
-
-  public int[] getProjectedOutputColumns() {
-    return projectedOutputColumns;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java
deleted file mode 100644
index 7fb59db..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorHashTableSinkDesc.
- *
- * Extra parameters beyond HashTableSinkDesc just for the VectorHashTableSinkOperator.
- *
- * We don't extend HashTableSinkDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorSparkHashTableSinkDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  public VectorSparkHashTableSinkDesc() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java
deleted file mode 100644
index 6e5ebe4..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-/**
- * VectorTableScanDesc.
- *
- * Extra parameters beyond TableScanDesc just for the VectorTableScanOperator.
- *
- * We don't extend TableScanDesc because the base OperatorDesc doesn't support
- * clone and adding it is a lot work for little gain.
- */
-public class VectorTableScanDesc extends AbstractVectorDesc  {
-
-  private static long serialVersionUID = 1L;
-
-  private int[] projectedOutputColumns;
-
-  public VectorTableScanDesc() {
-  }
-
-  public void setProjectedOutputColumns(int[] projectedOutputColumns) {
-    this.projectedOutputColumns = projectedOutputColumns;
-  }
-
-  public int[] getProjectedOutputColumns() {
-    return projectedOutputColumns;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java
deleted file mode 100644
index 32b62e8..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-public class VectorizationCondition {
-
-  private final boolean flag;
-  private final String conditionName;
-
-  public VectorizationCondition(boolean flag, String conditionName) {
-    this.flag = flag;
-    this.conditionName = conditionName;
-  }
-
-  public boolean getFlag() {
-    return flag;
-  }
-
-  public String getConditionName() {
-    return conditionName;
-  }
-
-  public static List<String> getConditionsMet(VectorizationCondition[] conditions) {
-    List<String> metList = new ArrayList<String>();
-    for (VectorizationCondition condition : conditions) {
-      if (condition.getFlag()) {
-        metList.add(condition.getConditionName() + " IS true");
-      }
-    }
-    return metList;
-  }
-
-  public static List<String> getConditionsNotMet(VectorizationCondition[] conditions) {
-    List<String> notMetList = new ArrayList<String>();
-    for (VectorizationCondition condition : conditions) {
-      if (!condition.getFlag()) {
-        notMetList.add(condition.getConditionName() + " IS false");
-      }
-    }
-    return notMetList;
-  }
-
-  public static List<String> addBooleans(List<String> conditions, boolean flag) {
-    ArrayList<String> result = new ArrayList<String>(conditions.size());
-    for (String condition : conditions) {
-      result.add(condition + " IS " + flag);
-    }
-    return result;
-  }
-
-  // Helper method.
-  public static List<String> getConditionsSupported(boolean isSupported) {
-    return Arrays.asList("Supported IS " + isSupported);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
index 22b845d..d3bb84d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
@@ -25,19 +25,13 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.FilterOperator;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.FilterExprAndExpr;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterLongColEqualDoubleScalar;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterLongColGreaterLongColumn;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.VectorFilterDesc;
 import org.junit.Test;
 
 /**
@@ -95,15 +89,10 @@ public class TestVectorFilterOperator {
     ExprNodeColumnDesc col1Expr = new  ExprNodeColumnDesc(Long.class, "col1", "table", false);
     List<String> columns = new ArrayList<String>();
     columns.add("col1");
+    VectorizationContext vc = new VectorizationContext("name", columns);
     FilterDesc fdesc = new FilterDesc();
     fdesc.setPredicate(col1Expr);
-
-    Operator<? extends OperatorDesc> filterOp = 
-        OperatorFactory.get(new CompilationOpContext(), fdesc);
-
-    VectorizationContext vc = new VectorizationContext("name", columns);
-
-    return (VectorFilterOperator) Vectorizer.vectorizeFilterOperator(filterOp, vc);
+    return new VectorFilterOperator(new CompilationOpContext(), vc, fdesc);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
index 086c0b7..f5b5d9d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
@@ -39,20 +39,16 @@ import java.util.Set;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.exec.vector.util.FakeCaptureOutputOperator;
 import org.apache.hadoop.hive.ql.exec.vector.util.FakeVectorRowBatchFromConcat;
 import org.apache.hadoop.hive.ql.exec.vector.util.FakeVectorRowBatchFromLongIterables;
 import org.apache.hadoop.hive.ql.exec.vector.util.FakeVectorRowBatchFromObjectIterables;
 import org.apache.hadoop.hive.ql.exec.vector.util.FakeVectorRowBatchFromRepeats;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc;
 import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc.ProcessingMode;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
@@ -132,11 +128,9 @@ public class TestVectorGroupByOperator {
     outputColumnNames.add("_col0");
 
     GroupByDesc desc = new GroupByDesc();
-    desc.setVectorDesc(new VectorGroupByDesc());
-
     desc.setOutputColumnNames(outputColumnNames);
     desc.setAggregators(aggs);
-    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(ProcessingMode.GLOBAL);
+    desc.getVectorDesc().setProcessingMode(ProcessingMode.GLOBAL);
 
     return desc;
   }
@@ -152,8 +146,6 @@ public class TestVectorGroupByOperator {
     outputColumnNames.add("_col0");
 
     GroupByDesc desc = new GroupByDesc();
-    desc.setVectorDesc(new VectorGroupByDesc());
-
     desc.setOutputColumnNames(outputColumnNames);
     desc.setAggregators(aggs);
 
@@ -170,7 +162,7 @@ public class TestVectorGroupByOperator {
       TypeInfo keyTypeInfo) {
 
     GroupByDesc desc = buildGroupByDescType(ctx, aggregate, GenericUDAFEvaluator.Mode.PARTIAL1, column, dataTypeInfo);
-    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(ProcessingMode.HASH);
+    desc.getVectorDesc().setProcessingMode(ProcessingMode.HASH);
 
     ExprNodeDesc keyExp = buildColumnDesc(ctx, key, keyTypeInfo);
     ArrayList<ExprNodeDesc> keys = new ArrayList<ExprNodeDesc>();
@@ -204,11 +196,7 @@ public class TestVectorGroupByOperator {
     desc.setMemoryThreshold(treshold);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -1747,19 +1735,13 @@ public class TestVectorGroupByOperator {
     }
 
     GroupByDesc desc = new GroupByDesc();
-    desc.setVectorDesc(new VectorGroupByDesc());
-
     desc.setOutputColumnNames(outputColumnNames);
     desc.setAggregators(aggs);
     desc.setKeys(keysDesc);
-    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(ProcessingMode.HASH);
+    desc.getVectorDesc().setProcessingMode(ProcessingMode.HASH);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -1864,11 +1846,9 @@ public class TestVectorGroupByOperator {
     outputColumnNames.add("_col1");
 
     GroupByDesc desc = new GroupByDesc();
-    desc.setVectorDesc(new VectorGroupByDesc());
-
     desc.setOutputColumnNames(outputColumnNames);
     desc.setAggregators(aggs);
-    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(ProcessingMode.HASH);
+    desc.getVectorDesc().setProcessingMode(ProcessingMode.HASH);
 
     ExprNodeDesc keyExp = buildColumnDesc(ctx, "Key",
         TypeInfoFactory.getPrimitiveTypeInfo(data.getTypes()[0]));
@@ -1877,11 +1857,7 @@ public class TestVectorGroupByOperator {
     desc.setKeys(keysDesc);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2276,14 +2252,10 @@ public class TestVectorGroupByOperator {
     VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
 
     GroupByDesc desc = buildGroupByDescCountStar (ctx);
-    ((VectorGroupByDesc) desc.getVectorDesc()).setProcessingMode(ProcessingMode.HASH);
+    desc.getVectorDesc().setProcessingMode(ProcessingMode.HASH);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2311,14 +2283,10 @@ public class TestVectorGroupByOperator {
     VectorizationContext ctx = new VectorizationContext("name", mapColumnNames);
 
     GroupByDesc desc = buildGroupByDescType(ctx, "count", GenericUDAFEvaluator.Mode.FINAL, "A", TypeInfoFactory.longTypeInfo);
-    VectorGroupByDesc vectorDesc = (VectorGroupByDesc) desc.getVectorDesc();
+    VectorGroupByDesc vectorDesc = desc.getVectorDesc();
     vectorDesc.setProcessingMode(ProcessingMode.GLOBAL);  // Use GLOBAL when no key for Reduce.
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2350,11 +2318,7 @@ public class TestVectorGroupByOperator {
         TypeInfoFactory.stringTypeInfo);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2386,11 +2350,7 @@ public class TestVectorGroupByOperator {
         buildGroupByDescType(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "A", TypeInfoFactory.getDecimalTypeInfo(30, 4));
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2423,11 +2383,7 @@ public class TestVectorGroupByOperator {
         TypeInfoFactory.doubleTypeInfo);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2458,11 +2414,7 @@ public class TestVectorGroupByOperator {
     GroupByDesc desc = buildGroupByDescType(ctx, aggregateName, GenericUDAFEvaluator.Mode.PARTIAL1, "A", TypeInfoFactory.longTypeInfo);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(null, null);
@@ -2497,11 +2449,7 @@ public class TestVectorGroupByOperator {
         TypeInfoFactory.longTypeInfo, "Key", TypeInfoFactory.longTypeInfo);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);
@@ -2567,11 +2515,7 @@ public class TestVectorGroupByOperator {
        dataTypeInfo, "Key", TypeInfoFactory.stringTypeInfo);
 
     CompilationOpContext cCtx = new CompilationOpContext();
-
-    Operator<? extends OperatorDesc> groupByOp = OperatorFactory.get(cCtx, desc);
-
-    VectorGroupByOperator vgo =
-        (VectorGroupByOperator) Vectorizer.vectorizeGroupByOperator(groupByOp, ctx);
+    VectorGroupByOperator vgo = new VectorGroupByOperator(cCtx, ctx, desc);
 
     FakeCaptureOutputOperator out = FakeCaptureOutputOperator.addCaptureOutputChild(cCtx, vgo);
     vgo.initialize(hconf, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
index 614b1d1..779177a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
@@ -26,7 +26,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.util.VectorizedRowGroupGenUtil;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -34,7 +33,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
-import org.apache.hadoop.hive.ql.plan.VectorSelectDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -53,7 +51,6 @@ public class TestVectorSelectOperator {
     public ValidatorVectorSelectOperator(CompilationOpContext ctx,
         VectorizationContext ctxt, OperatorDesc conf) throws HiveException {
       super(ctx, ctxt, conf);
-
       initializeOp(null);
     }
 
@@ -118,19 +115,6 @@ public class TestVectorSelectOperator {
     outputColNames.add("_col1");
     selDesc.setOutputColumnNames(outputColNames);
 
-    // CONSIDER unwinding ValidatorVectorSelectOperator as a subclass of VectorSelectOperator.
-    VectorSelectDesc vectorSelectDesc = new VectorSelectDesc();
-    selDesc.setVectorDesc(vectorSelectDesc);
-    List<ExprNodeDesc> selectColList = selDesc.getColList();
-    VectorExpression[] vectorSelectExprs = new VectorExpression[selectColList.size()];
-    for (int i = 0; i < selectColList.size(); i++) {
-      ExprNodeDesc expr = selectColList.get(i);
-      VectorExpression ve = vc.getVectorExpression(expr);
-      vectorSelectExprs[i] = ve;
-    }
-    vectorSelectDesc.setSelectExpressions(vectorSelectExprs);
-    vectorSelectDesc.setProjectedOutputColumns(new int[] {3, 2});
-
     ValidatorVectorSelectOperator vso = new ValidatorVectorSelectOperator(
         new CompilationOpContext(), vc, selDesc);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
index ccd1059..3295372 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
@@ -96,8 +96,6 @@ public class TestVectorizer {
     outputColumnNames.add("_col0");
 
     GroupByDesc desc = new GroupByDesc();
-    desc.setVectorDesc(new VectorGroupByDesc());
-
     desc.setOutputColumnNames(outputColumnNames);
     ArrayList<AggregationDesc> aggDescList = new ArrayList<AggregationDesc>();
     aggDescList.add(aggDesc);
@@ -108,14 +106,13 @@ public class TestVectorizer {
     grpByKeys.add(colExprB);
     desc.setKeys(grpByKeys);
 
-    Operator<? extends OperatorDesc> gbyOp = OperatorFactory.get(new CompilationOpContext(), desc);
-
+    GroupByOperator gbyOp = new GroupByOperator(new CompilationOpContext());
+    gbyOp.setConf(desc);
     desc.setMode(GroupByDesc.Mode.HASH);
 
     Vectorizer v = new Vectorizer();
-    v.testSetCurrentBaseWork(new MapWork());
     Assert.assertTrue(v.validateMapWorkOperator(gbyOp, null, false));
-    VectorGroupByOperator vectorOp = (VectorGroupByOperator) v.vectorizeOperator(gbyOp, vContext, false, null);
+    VectorGroupByOperator vectorOp = (VectorGroupByOperator) v.vectorizeOperator(gbyOp, vContext, false);
     Assert.assertEquals(VectorUDAFSumLong.class, vectorOp.getAggregators()[0].getClass());
     VectorUDAFSumLong udaf = (VectorUDAFSumLong) vectorOp.getAggregators()[0];
     Assert.assertEquals(FuncAbsLongToLong.class, udaf.getInputExpression().getClass());
@@ -150,9 +147,8 @@ public class TestVectorizer {
     andExprDesc.setChildren(children3);
 
     Vectorizer v = new Vectorizer();
-    v.testSetCurrentBaseWork(new MapWork());
-    Assert.assertFalse(v.validateExprNodeDesc(andExprDesc, "test", VectorExpressionDescriptor.Mode.FILTER));
-    Assert.assertFalse(v.validateExprNodeDesc(andExprDesc, "test", VectorExpressionDescriptor.Mode.PROJECTION));
+    Assert.assertFalse(v.validateExprNodeDesc(andExprDesc, VectorExpressionDescriptor.Mode.FILTER));
+    Assert.assertFalse(v.validateExprNodeDesc(andExprDesc, VectorExpressionDescriptor.Mode.PROJECTION));
   }
 
   /**
@@ -200,7 +196,6 @@ public class TestVectorizer {
     map.setConf(mjdesc);
 
     Vectorizer vectorizer = new Vectorizer();
-    vectorizer.testSetCurrentBaseWork(new MapWork());
     Assert.assertTrue(vectorizer.validateMapWorkOperator(map, null, false));
   }
 
@@ -217,7 +212,6 @@ public class TestVectorizer {
       map.setConf(mjdesc);
 
       Vectorizer vectorizer = new Vectorizer();
-      vectorizer.testSetCurrentBaseWork(new MapWork());
       Assert.assertTrue(vectorizer.validateMapWorkOperator(map, null, false));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q
index 48903d2..7e66cbc 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -38,7 +38,7 @@ alter table part_add_int_permute_select add columns(c int);
 
 insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333);
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -61,7 +61,7 @@ alter table part_add_int_string_permute_select add columns(c int, d string);
 
 insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444');
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -93,7 +93,7 @@ alter table part_change_string_group_double replace columns (insert_num int, c1
 
 insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
 
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
@@ -116,7 +116,7 @@ alter table part_change_date_group_string_group_date_timestamp replace columns(i
 
 insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
@@ -164,7 +164,7 @@ insert into table part_change_numeric_group_string_group_multi_ints_string_group
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
@@ -207,7 +207,7 @@ insert into table part_change_numeric_group_string_group_floating_string_group p
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
@@ -249,7 +249,7 @@ insert into table part_change_string_group_string_group_string partition(part=1)
           'filler', 'filler', 'filler',
           'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
@@ -299,7 +299,7 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa
             1234.5678, 9876.543, 789.321,
            'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
@@ -330,7 +330,7 @@ alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace c
 
 insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;
 
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
index 45afd9d..ac747e6 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -51,7 +51,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt'
 
 insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt;
 
- explain vectorization detail
+explain
 select insert_num,part,s1,b from part_change_various_various_struct1;
 
 select insert_num,part,s1,b from part_change_various_various_struct1;
@@ -111,7 +111,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt'
 
 insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s2 from part_add_various_various_struct2;
 
 select insert_num,part,b,s2 from part_add_various_various_struct2;
@@ -155,7 +155,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt'
 
 insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;
 
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
index b266a67..d3898a8 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -72,7 +72,7 @@ insert into table part_change_various_various_boolean_to_bigint partition(part=1
              bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, 
               'new' FROM schema_evolution_data;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
@@ -114,7 +114,7 @@ insert into table part_change_various_various_decimal_to_double partition(part=1
              double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1,
              'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
@@ -138,7 +138,7 @@ alter table part_change_various_various_timestamp replace columns (insert_num in
 
 insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
@@ -159,7 +159,7 @@ alter table part_change_various_various_date replace columns (insert_num int, c1
 
 insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
 
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
@@ -198,7 +198,7 @@ load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' over
 
 insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q
index 866942e..ffaa07b 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
 SET hive.vectorized.use.vectorized.input.format=true;
@@ -36,7 +36,7 @@ alter table table_add_int_permute_select add columns(c int);
 
 insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000);
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -59,7 +59,7 @@ alter table table_add_int_string_permute_select add columns(c int, d string);
 
 insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler');
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -91,7 +91,7 @@ alter table table_change_string_group_double replace columns (insert_num int, c1
 
 insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
 
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
@@ -158,7 +158,7 @@ insert into table table_change_numeric_group_string_group_multi_ints_string_grou
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
@@ -201,7 +201,7 @@ insert into table table_change_numeric_group_string_group_floating_string_group
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q
index 77c863a..6582035 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -39,7 +39,7 @@ alter table part_add_int_permute_select add columns(c int);
 
 insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333);
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -62,7 +62,7 @@ alter table part_add_int_string_permute_select add columns(c int, d string);
 
 insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444');
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -94,7 +94,7 @@ alter table part_change_string_group_double replace columns (insert_num int, c1
 
 insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
 
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
@@ -117,7 +117,7 @@ alter table part_change_date_group_string_group_date_timestamp replace columns(i
 
 insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
@@ -165,7 +165,7 @@ insert into table part_change_numeric_group_string_group_multi_ints_string_group
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
@@ -208,7 +208,7 @@ insert into table part_change_numeric_group_string_group_floating_string_group p
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
@@ -250,7 +250,7 @@ insert into table part_change_string_group_string_group_string partition(part=1)
           'filler', 'filler', 'filler',
           'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
@@ -300,7 +300,7 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa
             1234.5678, 9876.543, 789.321,
            'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
@@ -331,7 +331,7 @@ alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace c
 
 insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;
 
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q
index 7eb72e0..e38a01e 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -53,7 +53,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt'
 
 insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,s1,b from part_change_various_various_struct1;
 
 select insert_num,part,s1,b from part_change_various_various_struct1;
@@ -113,7 +113,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt'
 
 insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s2 from part_add_various_various_struct2;
 
 select insert_num,part,b,s2 from part_add_various_various_struct2;
@@ -157,7 +157,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt'
 
 insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;
 
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
index d5c01cd..c9d90c3 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -74,7 +74,7 @@ insert into table part_change_various_various_boolean_to_bigint partition(part=1
              bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, 
               'new' FROM schema_evolution_data;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
@@ -116,7 +116,7 @@ insert into table part_change_various_various_decimal_to_double partition(part=1
              double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1,
              'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
@@ -140,7 +140,7 @@ alter table part_change_various_various_timestamp replace columns (insert_num in
 
 insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
@@ -161,7 +161,7 @@ alter table part_change_various_various_date replace columns (insert_num int, c1
 
 insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
 
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
@@ -200,7 +200,7 @@ load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' over
 
 insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
index bbf03af..7785f87 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
 SET hive.vectorized.use.vectorized.input.format=false;
@@ -38,7 +38,7 @@ alter table table_add_int_permute_select add columns(c int);
 
 insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000);
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -61,7 +61,7 @@ alter table table_add_int_string_permute_select add columns(c int, d string);
 
 insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler');
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -93,7 +93,7 @@ alter table table_change_string_group_double replace columns (insert_num int, c1
 
 insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
 
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
@@ -160,7 +160,7 @@ insert into table table_change_numeric_group_string_group_multi_ints_string_grou
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
@@ -203,7 +203,7 @@ insert into table table_change_numeric_group_string_group_floating_string_group
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;


[25/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
index ab7a103..13a8b35 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
@@ -36,7 +36,7 @@ POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION [(unique_timestamps)
 POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE [(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, comment:null), ]
 tsval	tsval
 PREHOOK: query: -- interval year-month arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '2-2' year to month,
@@ -49,7 +49,7 @@ from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
 POSTHOOK: query: -- interval year-month arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '2-2' year to month,
@@ -62,10 +62,6 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -83,61 +79,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 2-2) (type: date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 2, 3, 4, 5, 6, 7]
-                        selectExpressions: DateColSubtractIntervalYearMonthScalar(col 0, val 2-2) -> 2:long, DateColSubtractIntervalYearMonthScalar(col 0, val -2-2) -> 3:long, DateColAddIntervalYearMonthScalar(col 0, val 2-2) -> 4:long, DateColAddIntervalYearMonthScalar(col 0, val -2-2) -> 5:long, IntervalYearMonthScalarAddDateColumn(val -2-2, col 0) -> 6:long, IntervalYearMonthScalarAddDateColumn(val 2-2, col 0) -> 7:long
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: date), VALUE._col4 (type: date), VALUE._col5 (type: date)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -227,7 +188,7 @@ dateval	c1	c2	c3	c4	c5	c6
 9075-06-13	9073-04-13	9077-08-13	9077-08-13	9073-04-13	9073-04-13	9077-08-13
 9209-11-11	9207-09-11	9212-01-11	9212-01-11	9207-09-11	9207-09-11	9212-01-11
 9403-01-09	9400-11-09	9405-03-09	9405-03-09	9400-11-09	9400-11-09	9405-03-09
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   dateval,
   dateval - date '1999-06-07',
@@ -236,7 +197,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   dateval,
   dateval - date '1999-06-07',
@@ -246,10 +207,6 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -267,61 +224,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 1999-06-07) (type: interval_day_time), (1999-06-07 - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 2, 3, 4]
-                        selectExpressions: DateColSubtractDateScalar(col 0, val 1999-06-07 00:00:00.0) -> 2:timestamp, DateScalarSubtractDateColumn(val 1999-06-07 00:00:00.0, col 0) -> 3:timestamp, DateColSubtractDateColumn(col 0, col 0) -> 4:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -405,7 +327,7 @@ dateval	c1	c2	c3
 9075-06-13	2584462 00:00:00.000000000	-2584462 00:00:00.000000000	0 00:00:00.000000000
 9209-11-11	2633556 01:00:00.000000000	-2633556 01:00:00.000000000	0 00:00:00.000000000
 9403-01-09	2704106 01:00:00.000000000	-2704106 01:00:00.000000000	0 00:00:00.000000000
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   tsval,
   tsval - interval '2-2' year to month,
@@ -417,7 +339,7 @@ select
 from interval_arithmetic_1
 order by tsval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   tsval,
   tsval - interval '2-2' year to month,
@@ -430,10 +352,6 @@ from interval_arithmetic_1
 order by tsval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -451,61 +369,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: tsval (type: timestamp), (tsval - 2-2) (type: timestamp), (tsval - -2-2) (type: timestamp), (tsval + 2-2) (type: timestamp), (tsval + -2-2) (type: timestamp), (-2-2 + tsval) (type: timestamp), (2-2 + tsval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7]
-                        selectExpressions: TimestampColSubtractIntervalYearMonthScalar(col 1, val 2-2) -> 2:timestamp, TimestampColSubtractIntervalYearMonthScalar(col 1, val -2-2) -> 3:timestamp, TimestampColAddIntervalYearMonthScalar(col 1, val 2-2) -> 4:timestamp, TimestampColAddIntervalYearMonthScalar(col 1, val -2-2) -> 5:timestamp, IntervalYearMonthScalarAddTimestampColumn(val -2-2, col 1) -> 6:timestamp, IntervalYearMonthScalarAddTimestampColumn(val 2-2, col 1) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -595,7 +478,7 @@ tsval	c1	c2	c3	c4	c5	c6
 9075-06-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9077-08-13 16:20:09.218517797	9077-08-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9077-08-13 16:20:09.218517797
 9209-11-11 04:08:58.223768453	9207-09-11 05:08:58.223768453	9212-01-11 04:08:58.223768453	9212-01-11 04:08:58.223768453	9207-09-11 05:08:58.223768453	9207-09-11 05:08:58.223768453	9212-01-11 04:08:58.223768453
 9403-01-09 18:12:33.547	9400-11-09 18:12:33.547	9405-03-09 18:12:33.547	9405-03-09 18:12:33.547	9400-11-09 18:12:33.547	9400-11-09 18:12:33.547	9405-03-09 18:12:33.547
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
@@ -603,7 +486,7 @@ from interval_arithmetic_1
 order by interval '2-2' year to month + interval '3-3' year to month
 limit 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
@@ -612,10 +495,6 @@ order by interval '2-2' year to month + interval '3-3' year to month
 limit 2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -633,64 +512,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: []
                     Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                       TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
-                    selectExpressions: ConstantVectorExpression(val 65) -> 0:long, ConstantVectorExpression(val -13) -> 1:long
                 Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 2
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -725,7 +566,7 @@ c0	c1
 5-5	-1-1
 5-5	-1-1
 PREHOOK: query: -- interval day-time arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '99 11:22:33.123456789' day to second,
@@ -738,7 +579,7 @@ from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
 POSTHOOK: query: -- interval day-time arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '99 11:22:33.123456789' day to second,
@@ -751,10 +592,6 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -772,61 +609,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 99 11:22:33.123456789) (type: timestamp), (dateval - -99 11:22:33.123456789) (type: timestamp), (dateval + 99 11:22:33.123456789) (type: timestamp), (dateval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + dateval) (type: timestamp), (99 11:22:33.123456789 + dateval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 2, 3, 4, 5, 6, 7]
-                        selectExpressions: DateColSubtractIntervalDayTimeScalar(col 0, val 99 11:22:33.123456789) -> 2:timestamp, DateColSubtractIntervalDayTimeScalar(col 0, val -99 11:22:33.123456789) -> 3:timestamp, DateColAddIntervalDayTimeScalar(col 0, val 99 11:22:33.123456789) -> 4:timestamp, DateColAddIntervalDayTimeScalar(col 0, val -99 11:22:33.123456789) -> 5:timestamp, IntervalDayTimeScalarAddDateColumn(val -99 11:22:33.123456789, col 0) -> 6:timestamp, IntervalDayTimeScalarAddDateColumn(val 99 11:22:33.123456789, col 0) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -916,7 +718,7 @@ dateval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13	9075-03-05 11:37:26.876543211	9075-09-20 11:22:33.123456789	9075-09-20 11:22:33.123456789	9075-03-05 11:37:26.876543211	9075-03-05 11:37:26.876543211	9075-09-20 11:22:33.123456789
 9209-11-11	9209-08-03 13:37:26.876543211	9210-02-18 11:22:33.123456789	9210-02-18 11:22:33.123456789	9209-08-03 13:37:26.876543211	9209-08-03 13:37:26.876543211	9210-02-18 11:22:33.123456789
 9403-01-09	9402-10-01 13:37:26.876543211	9403-04-18 12:22:33.123456789	9403-04-18 12:22:33.123456789	9402-10-01 13:37:26.876543211	9402-10-01 13:37:26.876543211	9403-04-18 12:22:33.123456789
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   dateval,
   tsval,
@@ -926,7 +728,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   dateval,
   tsval,
@@ -937,10 +739,6 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -958,61 +756,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), tsval (type: timestamp), (dateval - tsval) (type: interval_day_time), (tsval - dateval) (type: interval_day_time), (tsval - tsval) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4]
-                        selectExpressions: DateColSubtractTimestampColumn(col 0, col 1) -> 2:interval_day_time, TimestampColSubtractDateColumn(col 1, col 0) -> 3:interval_day_time, TimestampColSubtractTimestampColumn(col 1, col 1) -> 4:interval_day_time
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: timestamp), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1098,7 +861,7 @@ dateval	tsval	c2	c3	c4
 9075-06-13	9075-06-13 16:20:09.218517797	-0 16:20:09.218517797	0 16:20:09.218517797	0 00:00:00.000000000
 9209-11-11	9209-11-11 04:08:58.223768453	-0 04:08:58.223768453	0 04:08:58.223768453	0 00:00:00.000000000
 9403-01-09	9403-01-09 18:12:33.547	-0 18:12:33.547000000	0 18:12:33.547000000	0 00:00:00.000000000
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   tsval,
   tsval - interval '99 11:22:33.123456789' day to second,
@@ -1110,7 +873,7 @@ select
 from interval_arithmetic_1
 order by tsval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   tsval,
   tsval - interval '99 11:22:33.123456789' day to second,
@@ -1123,10 +886,6 @@ from interval_arithmetic_1
 order by tsval
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1144,61 +903,26 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: tsval (type: timestamp), (tsval - 99 11:22:33.123456789) (type: timestamp), (tsval - -99 11:22:33.123456789) (type: timestamp), (tsval + 99 11:22:33.123456789) (type: timestamp), (tsval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + tsval) (type: timestamp), (99 11:22:33.123456789 + tsval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7]
-                        selectExpressions: TimestampColSubtractIntervalDayTimeScalar(col 1, val 99 11:22:33.123456789) -> 2:timestamp, TimestampColSubtractIntervalDayTimeScalar(col 1, val -99 11:22:33.123456789) -> 3:timestamp, TimestampColAddIntervalDayTimeScalar(col 1, val 99 11:22:33.123456789) -> 4:timestamp, TimestampColAddIntervalDayTimeScalar(col 1, val -99 11:22:33.123456789) -> 5:timestamp, IntervalDayTimeScalarAddTimestampColumn(val -99 11:22:33.123456789, col 1) -> 6:timestamp, IntervalDayTimeScalarAddTimestampColumn(val 99 11:22:33.123456789, col 1) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1288,14 +1012,14 @@ tsval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13 16:20:09.218517797	9075-03-06 03:57:36.095061008	9075-09-21 03:42:42.341974586	9075-09-21 03:42:42.341974586	9075-03-06 03:57:36.095061008	9075-03-06 03:57:36.095061008	9075-09-21 03:42:42.341974586
 9209-11-11 04:08:58.223768453	9209-08-03 17:46:25.100311664	9210-02-18 15:31:31.347225242	9210-02-18 15:31:31.347225242	9209-08-03 17:46:25.100311664	9209-08-03 17:46:25.100311664	9210-02-18 15:31:31.347225242
 9403-01-09 18:12:33.547	9402-10-02 07:50:00.423543211	9403-04-19 06:35:06.670456789	9403-04-19 06:35:06.670456789	9402-10-02 07:50:00.423543211	9402-10-02 07:50:00.423543211	9403-04-19 06:35:06.670456789
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second
 from interval_arithmetic_1
 limit 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second
@@ -1303,10 +1027,6 @@ from interval_arithmetic_1
 limit 2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1321,29 +1041,15 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: 109 20:30:40.246913578 (type: interval_day_time), 89 02:14:26.000000000 (type: interval_day_time)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3]
-                        selectExpressions: ConstantVectorExpression(val 109 20:30:40.246913578) -> 2:interval_day_time, ConstantVectorExpression(val 89 02:14:26.000000000) -> 3:interval_day_time
                     Statistics: Num rows: 50 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 2
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
                       Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1351,14 +1057,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
index 002d011..0bc0e4c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
@@ -136,7 +136,7 @@ POSTHOOK: Lineage: vectortab_b_1korc.si SIMPLE [(vectortab_b_1k)vectortab_b_1k.F
 POSTHOOK: Lineage: vectortab_b_1korc.t SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab_b_1korc.ts SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab_b_1korc.ts2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
    v1.s,
    v2.s,
@@ -158,7 +158,7 @@ join
       on v1.intrvl1 = v2.intrvl2 
       and v1.s = v2.s
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
    v1.s,
    v2.s,
@@ -180,10 +180,6 @@ join
       on v1.intrvl1 = v2.intrvl2 
       and v1.s = v2.s
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -201,24 +197,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab_a_1korc
                   Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 8) -> boolean, SelectColumnIsNotNull(col 14)(children: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp) -> boolean) -> boolean
                     predicate: (s is not null and (dt - CAST( ts AS DATE)) is not null) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: s (type: string), (dt - CAST( ts AS DATE)) (type: interval_day_time)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 14]
-                          selectExpressions: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp
                       Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -226,10 +210,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: string), _col1 (type: interval_day_time)
                           1 _col0 (type: string), _col1 (type: interval_day_time)
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerBigOnlyMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1, _col2
                         input vertices:
                           1 Map 2
@@ -237,16 +217,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: string), _col2 (type: string), _col1 (type: interval_day_time)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [8, 8, 14]
                           Statistics: Num rows: 1100 Data size: 506290 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1100 Data size: 506290 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -254,57 +227,25 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: vectortab_b_1korc
                   Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 8) -> boolean, SelectColumnIsNotNull(col 14)(children: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp) -> boolean) -> boolean
                     predicate: (s is not null and (dt - CAST( ts AS DATE)) is not null) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: s (type: string), (dt - CAST( ts AS DATE)) (type: interval_day_time)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 14]
-                          selectExpressions: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp
                       Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: interval_day_time)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: interval_day_time)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator


[51/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

This reverts commit 16d28b343b76c998b8fdbd8a91bae07ac82357de.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad6ce078
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad6ce078
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad6ce078

Branch: refs/heads/hive-14535
Commit: ad6ce0781a4e68fad2960c1053c325753a9504db
Parents: 36e810f
Author: Matt McCline <mm...@hortonworks.com>
Authored: Mon Oct 17 13:31:42 2016 -0700
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Mon Oct 17 13:31:42 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hive/common/util/DateUtils.java  |    20 -
 .../ColumnArithmeticColumn.txt                  |     7 +-
 .../ColumnArithmeticColumnDecimal.txt           |     5 -
 .../ColumnArithmeticColumnWithConvert.txt       |   173 +
 .../ColumnArithmeticScalar.txt                  |     5 -
 .../ColumnArithmeticScalarDecimal.txt           |     5 -
 .../ColumnArithmeticScalarWithConvert.txt       |   150 +
 .../ExpressionTemplates/ColumnCompareColumn.txt |     5 -
 .../ExpressionTemplates/ColumnCompareScalar.txt |     5 -
 .../ExpressionTemplates/ColumnDivideColumn.txt  |     5 -
 .../ColumnDivideColumnDecimal.txt               |     5 -
 .../ExpressionTemplates/ColumnDivideScalar.txt  |     5 -
 .../ColumnDivideScalarDecimal.txt               |     5 -
 .../ExpressionTemplates/ColumnUnaryFunc.txt     |     5 -
 .../ExpressionTemplates/ColumnUnaryMinus.txt    |     5 -
 ...eColumnArithmeticIntervalYearMonthColumn.txt |     5 -
 ...eColumnArithmeticIntervalYearMonthScalar.txt |     5 -
 .../DateColumnArithmeticTimestampColumn.txt     |     5 -
 .../DateColumnArithmeticTimestampScalar.txt     |     5 -
 ...eScalarArithmeticIntervalYearMonthColumn.txt |     5 -
 .../DateScalarArithmeticTimestampColumn.txt     |     5 -
 .../DecimalColumnUnaryFunc.txt                  |     5 -
 .../ExpressionTemplates/FilterColumnBetween.txt |     7 +-
 .../FilterColumnCompareColumn.txt               |     9 +-
 .../FilterColumnCompareScalar.txt               |     9 +-
 .../FilterDecimalColumnBetween.txt              |     5 -
 .../FilterDecimalColumnCompareDecimalColumn.txt |     5 -
 .../FilterDecimalColumnCompareDecimalScalar.txt |     5 -
 .../FilterDecimalScalarCompareDecimalColumn.txt |     5 -
 ...erLongDoubleColumnCompareTimestampColumn.txt |     5 -
 ...erLongDoubleScalarCompareTimestampColumn.txt |     5 -
 .../FilterScalarCompareColumn.txt               |     9 +-
 .../FilterStringColumnBetween.txt               |     9 +-
 ...tringGroupColumnCompareStringGroupColumn.txt |     5 -
 ...gGroupColumnCompareStringGroupScalarBase.txt |     7 -
 ...gGroupScalarCompareStringGroupColumnBase.txt |     8 -
 .../FilterTimestampColumnBetween.txt            |     5 -
 ...erTimestampColumnCompareLongDoubleColumn.txt |     5 -
 ...erTimestampColumnCompareLongDoubleScalar.txt |     5 -
 ...terTimestampColumnCompareTimestampColumn.txt |     5 -
 ...terTimestampColumnCompareTimestampScalar.txt |     5 -
 ...erTimestampScalarCompareLongDoubleColumn.txt |     5 -
 ...terTimestampScalarCompareTimestampColumn.txt |     5 -
 .../FilterTruncStringColumnBetween.txt          |    10 +-
 .../ExpressionTemplates/IfExprColumnScalar.txt  |     5 -
 .../ExpressionTemplates/IfExprScalarColumn.txt  |     5 -
 .../ExpressionTemplates/IfExprScalarScalar.txt  |     5 -
 ...ervalYearMonthColumnArithmeticDateColumn.txt |     5 -
 ...ervalYearMonthColumnArithmeticDateScalar.txt |     5 -
 ...YearMonthColumnArithmeticTimestampColumn.txt |     5 -
 ...YearMonthColumnArithmeticTimestampScalar.txt |     5 -
 ...ervalYearMonthScalarArithmeticDateColumn.txt |     5 -
 ...YearMonthScalarArithmeticTimestampColumn.txt |     5 -
 .../LongDoubleColumnCompareTimestampColumn.txt  |     5 -
 .../LongDoubleColumnCompareTimestampScalar.txt  |     4 -
 .../LongDoubleScalarCompareTimestampColumn.txt  |     5 -
 .../ScalarArithmeticColumn.txt                  |     5 -
 .../ScalarArithmeticColumnDecimal.txt           |     5 -
 .../ScalarArithmeticColumnWithConvert.txt       |   163 +
 .../ExpressionTemplates/ScalarCompareColumn.txt |     5 -
 .../ExpressionTemplates/ScalarDivideColumn.txt  |     5 -
 .../ScalarDivideColumnDecimal.txt               |     5 -
 ...tringGroupColumnCompareStringGroupColumn.txt |     5 -
 ...gGroupColumnCompareStringGroupScalarBase.txt |     6 -
 ...tringGroupColumnCompareTruncStringScalar.txt |     7 -
 ...gGroupScalarCompareStringGroupColumnBase.txt |     7 -
 .../TimestampColumnArithmeticDateColumn.txt     |     5 -
 .../TimestampColumnArithmeticDateScalar.txt     |     5 -
 ...pColumnArithmeticIntervalYearMonthColumn.txt |     5 -
 ...pColumnArithmeticIntervalYearMonthScalar.txt |     5 -
 ...TimestampColumnArithmeticTimestampColumn.txt |     5 -
 ...TimestampColumnArithmeticTimestampScalar.txt |     5 -
 .../TimestampColumnCompareLongDoubleColumn.txt  |     5 -
 .../TimestampColumnCompareLongDoubleScalar.txt  |     5 -
 .../TimestampColumnCompareTimestampColumn.txt   |     5 -
 .../TimestampColumnCompareTimestampScalar.txt   |     5 -
 .../TimestampScalarArithmeticDateColumn.txt     |     5 -
 ...pScalarArithmeticIntervalYearMonthColumn.txt |     5 -
 ...TimestampScalarArithmeticTimestampColumn.txt |     5 -
 .../TimestampScalarCompareTimestampColumn.txt   |     5 -
 ...runcStringScalarCompareStringGroupColumn.txt |     2 -
 .../UDAFTemplates/VectorUDAFAvg.txt             |     6 -
 .../UDAFTemplates/VectorUDAFMinMax.txt          |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |     6 -
 .../VectorUDAFMinMaxIntervalDayTime.txt         |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |     6 -
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |     6 -
 .../UDAFTemplates/VectorUDAFSum.txt             |     6 -
 .../UDAFTemplates/VectorUDAFVar.txt             |     6 -
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |     6 -
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   252 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |     2 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |     5 -
 .../hadoop/hive/ql/exec/SelectOperator.java     |     1 +
 .../hive/ql/exec/persistence/MapJoinKey.java    |     9 +-
 .../hive/ql/exec/spark/HashTableLoader.java     |     2 +-
 .../ql/exec/vector/VectorColumnMapping.java     |    12 +-
 .../ql/exec/vector/VectorColumnOrderedMap.java  |    33 +-
 .../exec/vector/VectorColumnOutputMapping.java  |     7 +-
 .../exec/vector/VectorColumnSourceMapping.java  |     7 +-
 .../hive/ql/exec/vector/VectorCopyRow.java      |     3 +-
 .../ql/exec/vector/VectorFilterOperator.java    |     4 +-
 .../ql/exec/vector/VectorGroupByOperator.java   |    21 +-
 .../ql/exec/vector/VectorSelectOperator.java    |    33 +-
 .../ql/exec/vector/VectorizationContext.java    |    38 +-
 .../exec/vector/VectorizationContextRegion.java |     4 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |     3 -
 ...AbstractFilterStringColLikeStringScalar.java |     5 -
 .../CastBooleanToCharViaLongToChar.java         |     5 -
 .../CastBooleanToVarCharViaLongToVarChar.java   |     5 -
 .../exec/vector/expressions/CastDateToChar.java |     4 -
 .../vector/expressions/CastDateToVarChar.java   |     5 -
 .../vector/expressions/CastDecimalToChar.java   |     5 -
 .../expressions/CastDecimalToDecimal.java       |     5 -
 .../expressions/CastDecimalToVarChar.java       |     5 -
 .../expressions/CastDoubleToTimestamp.java      |     5 -
 .../exec/vector/expressions/CastLongToChar.java |     5 -
 .../exec/vector/expressions/CastLongToDate.java |     5 -
 .../vector/expressions/CastLongToTimestamp.java |     5 -
 .../vector/expressions/CastLongToVarChar.java   |     5 -
 .../CastMillisecondsLongToTimestamp.java        |     5 -
 .../expressions/CastStringGroupToChar.java      |     4 -
 .../expressions/CastStringGroupToVarChar.java   |     5 -
 .../vector/expressions/CastStringToDate.java    |     5 -
 .../vector/expressions/CastStringToDecimal.java |     5 -
 .../CastStringToIntervalDayTime.java            |     5 -
 .../CastStringToIntervalYearMonth.java          |     5 -
 .../expressions/CastTimestampToBoolean.java     |     7 +-
 .../expressions/CastTimestampToDouble.java      |     5 -
 .../vector/expressions/CastTimestampToLong.java |     7 +-
 .../ql/exec/vector/expressions/ColAndCol.java   |     5 -
 .../ql/exec/vector/expressions/ColOrCol.java    |     5 -
 .../expressions/ConstantVectorExpression.java   |    33 -
 .../expressions/DateColSubtractDateColumn.java  |     4 -
 .../expressions/DateColSubtractDateScalar.java  |     5 -
 .../DateScalarSubtractDateColumn.java           |     5 -
 .../vector/expressions/DecimalColumnInList.java |     7 -
 .../expressions/DecimalToStringUnaryUDF.java    |     5 -
 .../vector/expressions/DoubleColumnInList.java  |     7 -
 .../vector/expressions/FilterColAndScalar.java  |     5 -
 .../vector/expressions/FilterColOrScalar.java   |     5 -
 .../expressions/FilterDecimalColumnInList.java  |     7 -
 .../expressions/FilterDoubleColumnInList.java   |     6 -
 .../vector/expressions/FilterExprAndExpr.java   |     8 -
 .../vector/expressions/FilterExprOrExpr.java    |     6 -
 .../expressions/FilterLongColumnInList.java     |     7 -
 .../expressions/FilterScalarAndColumn.java      |     5 -
 .../expressions/FilterScalarOrColumn.java       |     5 -
 .../expressions/FilterStringColumnInList.java   |     7 -
 .../expressions/FilterStructColumnInList.java   |     9 -
 .../FilterTimestampColumnInList.java            |     7 -
 .../vector/expressions/FuncDecimalToDouble.java |     5 -
 .../vector/expressions/FuncDecimalToLong.java   |     5 -
 .../expressions/FuncDecimalToTimestamp.java     |     5 -
 .../vector/expressions/FuncDoubleToDecimal.java |     4 -
 .../vector/expressions/FuncLongToDecimal.java   |     4 -
 .../vector/expressions/FuncLongToString.java    |     4 -
 .../FuncRoundWithNumDigitsDecimalToDecimal.java |     4 -
 .../expressions/FuncTimestampToDecimal.java     |     5 -
 .../vector/expressions/FuncTimestampToLong.java |     5 -
 .../vector/expressions/IdentityExpression.java  |     5 -
 .../IfExprDoubleColumnDoubleColumn.java         |     5 -
 .../IfExprIntervalDayTimeColumnColumn.java      |     5 -
 .../IfExprIntervalDayTimeColumnScalar.java      |     5 -
 .../IfExprIntervalDayTimeScalarColumn.java      |     5 -
 .../IfExprIntervalDayTimeScalarScalar.java      |     5 -
 .../expressions/IfExprLongColumnLongColumn.java |     5 -
 ...fExprStringGroupColumnStringGroupColumn.java |     5 -
 .../IfExprStringGroupColumnStringScalar.java    |     7 -
 .../IfExprStringScalarStringGroupColumn.java    |     7 -
 .../IfExprStringScalarStringScalar.java         |     7 -
 .../IfExprTimestampColumnColumnBase.java        |     5 -
 .../IfExprTimestampColumnScalarBase.java        |     6 -
 .../IfExprTimestampScalarColumnBase.java        |     6 -
 .../IfExprTimestampScalarScalarBase.java        |     6 -
 .../ql/exec/vector/expressions/IsNotNull.java   |     5 -
 .../hive/ql/exec/vector/expressions/IsNull.java |     5 -
 .../expressions/LongColDivideLongColumn.java    |     5 -
 .../expressions/LongColDivideLongScalar.java    |     5 -
 .../expressions/LongColEqualLongColumn.java     |     5 -
 .../expressions/LongColEqualLongScalar.java     |     4 -
 .../LongColGreaterEqualLongColumn.java          |     5 -
 .../LongColGreaterEqualLongScalar.java          |     5 -
 .../expressions/LongColGreaterLongColumn.java   |     5 -
 .../expressions/LongColGreaterLongScalar.java   |     5 -
 .../expressions/LongColLessEqualLongColumn.java |     5 -
 .../expressions/LongColLessEqualLongScalar.java |     5 -
 .../expressions/LongColLessLongColumn.java      |     5 -
 .../expressions/LongColLessLongScalar.java      |     5 -
 .../expressions/LongColNotEqualLongColumn.java  |     5 -
 .../expressions/LongColNotEqualLongScalar.java  |     5 -
 .../vector/expressions/LongColumnInList.java    |     6 -
 .../expressions/LongScalarDivideLongColumn.java |     5 -
 .../expressions/LongScalarEqualLongColumn.java  |     4 -
 .../LongScalarGreaterEqualLongColumn.java       |     5 -
 .../LongScalarGreaterLongColumn.java            |     5 -
 .../LongScalarLessEqualLongColumn.java          |     4 -
 .../expressions/LongScalarLessLongColumn.java   |     5 -
 .../LongScalarNotEqualLongColumn.java           |     5 -
 .../expressions/LongToStringUnaryUDF.java       |     5 -
 .../expressions/MathFuncDoubleToDouble.java     |     7 +-
 .../expressions/MathFuncLongToDouble.java       |     5 -
 .../vector/expressions/MathFuncLongToLong.java  |     7 +-
 .../hive/ql/exec/vector/expressions/NotCol.java |     5 -
 .../expressions/PosModDoubleToDouble.java       |     5 -
 .../vector/expressions/PosModLongToLong.java    |     5 -
 .../RoundWithNumDigitsDoubleToDouble.java       |     5 -
 .../vector/expressions/SelectColumnIsFalse.java |     4 -
 .../expressions/SelectColumnIsNotNull.java      |     5 -
 .../vector/expressions/SelectColumnIsNull.java  |     5 -
 .../vector/expressions/SelectColumnIsTrue.java  |     5 -
 .../SelectStringColLikeStringScalar.java        |    10 +-
 .../vector/expressions/StringColumnInList.java  |     5 -
 .../StringGroupColConcatStringScalar.java       |     7 -
 .../expressions/StringGroupConcatColCol.java    |     5 -
 .../exec/vector/expressions/StringLength.java   |     4 -
 .../StringScalarConcatStringGroupCol.java       |     7 -
 .../expressions/StringSubstrColStart.java       |     5 -
 .../expressions/StringSubstrColStartLen.java    |     5 -
 .../exec/vector/expressions/StringUnaryUDF.java |     5 -
 .../expressions/StringUnaryUDFDirect.java       |     4 -
 .../vector/expressions/StructColumnInList.java  |     8 -
 .../expressions/TimestampColumnInList.java      |     6 -
 .../expressions/TimestampToStringUnaryUDF.java  |     4 -
 .../exec/vector/expressions/VectorCoalesce.java |     5 -
 .../ql/exec/vector/expressions/VectorElt.java   |     7 -
 .../vector/expressions/VectorExpression.java    |    44 +-
 .../expressions/VectorUDFDateAddColCol.java     |     5 -
 .../expressions/VectorUDFDateAddColScalar.java  |     5 -
 .../expressions/VectorUDFDateAddScalarCol.java  |     5 -
 .../expressions/VectorUDFDateDiffColCol.java    |     5 -
 .../expressions/VectorUDFDateDiffColScalar.java |     6 -
 .../expressions/VectorUDFDateDiffScalarCol.java |     5 -
 .../VectorUDFTimestampFieldDate.java            |    10 -
 .../VectorUDFTimestampFieldString.java          |     9 -
 .../VectorUDFTimestampFieldTimestamp.java       |     9 -
 .../aggregates/VectorAggregateExpression.java   |    19 -
 .../aggregates/VectorUDAFAvgDecimal.java        |     6 -
 .../aggregates/VectorUDAFAvgTimestamp.java      |     6 -
 .../expressions/aggregates/VectorUDAFCount.java |     6 -
 .../aggregates/VectorUDAFCountMerge.java        |     6 -
 .../aggregates/VectorUDAFCountStar.java         |     7 -
 .../aggregates/VectorUDAFStdPopTimestamp.java   |     6 -
 .../aggregates/VectorUDAFStdSampTimestamp.java  |     6 -
 .../aggregates/VectorUDAFSumDecimal.java        |     6 -
 .../aggregates/VectorUDAFVarPopTimestamp.java   |     6 -
 .../aggregates/VectorUDAFVarSampTimestamp.java  |     6 -
 .../mapjoin/VectorMapJoinCommonOperator.java    |   363 +-
 .../VectorMapJoinInnerBigOnlyLongOperator.java  |    11 +-
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java |    15 +-
 ...VectorMapJoinInnerBigOnlyStringOperator.java |    11 +-
 .../mapjoin/VectorMapJoinInnerLongOperator.java |    11 +-
 .../VectorMapJoinInnerMultiKeyOperator.java     |    15 +-
 .../VectorMapJoinInnerStringOperator.java       |    11 +-
 .../VectorMapJoinLeftSemiLongOperator.java      |    11 +-
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  |    15 +-
 .../VectorMapJoinLeftSemiStringOperator.java    |    11 +-
 .../mapjoin/VectorMapJoinOuterLongOperator.java |    11 +-
 .../VectorMapJoinOuterMultiKeyOperator.java     |    15 +-
 .../VectorMapJoinOuterStringOperator.java       |    11 +-
 .../fast/VectorMapJoinFastTableContainer.java   |     2 +-
 .../VectorMapJoinOptimizedCreateHashTable.java  |     2 +-
 .../VectorReduceSinkCommonOperator.java         |     2 +-
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |     5 -
 .../hive/ql/optimizer/physical/Vectorizer.java  |  1458 +--
 .../ql/optimizer/physical/VectorizerReason.java |   123 -
 .../hive/ql/parse/ExplainConfiguration.java     |    39 -
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    38 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     5 -
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    28 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |     6 -
 .../hive/ql/plan/AbstractOperatorDesc.java      |    12 -
 .../hadoop/hive/ql/plan/AbstractVectorDesc.java |    14 -
 .../hadoop/hive/ql/plan/AppMasterEventDesc.java |    24 -
 .../apache/hadoop/hive/ql/plan/BaseWork.java    |   197 +-
 .../org/apache/hadoop/hive/ql/plan/Explain.java |    29 -
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |    13 -
 .../apache/hadoop/hive/ql/plan/FetchWork.java   |    45 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |    17 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |    28 -
 .../apache/hadoop/hive/ql/plan/GroupByDesc.java |    84 +-
 .../hadoop/hive/ql/plan/HashTableSinkDesc.java  |     1 -
 .../apache/hadoop/hive/ql/plan/LimitDesc.java   |    18 -
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |   218 +-
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |    99 +-
 .../hadoop/hive/ql/plan/MapredLocalWork.java    |     4 +-
 .../apache/hadoop/hive/ql/plan/MapredWork.java  |    11 +-
 .../ql/plan/OperatorExplainVectorization.java   |    85 -
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |   119 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |    92 +-
 .../apache/hadoop/hive/ql/plan/SelectDesc.java  |    35 -
 .../hive/ql/plan/SparkHashTableSinkDesc.java    |    25 -
 .../apache/hadoop/hive/ql/plan/SparkWork.java   |     6 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |    26 -
 .../org/apache/hadoop/hive/ql/plan/TezWork.java |    11 +-
 .../hive/ql/plan/VectorAppMasterEventDesc.java  |    35 -
 .../apache/hadoop/hive/ql/plan/VectorDesc.java  |     5 -
 .../hadoop/hive/ql/plan/VectorFileSinkDesc.java |    35 -
 .../hadoop/hive/ql/plan/VectorFilterDesc.java   |    48 -
 .../hadoop/hive/ql/plan/VectorGroupByDesc.java  |    31 -
 .../hadoop/hive/ql/plan/VectorLimitDesc.java    |    35 -
 .../hadoop/hive/ql/plan/VectorMapJoinDesc.java  |   110 -
 .../hadoop/hive/ql/plan/VectorMapJoinInfo.java  |   169 -
 .../hive/ql/plan/VectorReduceSinkDesc.java      |    68 -
 .../hadoop/hive/ql/plan/VectorSMBJoinDesc.java  |    35 -
 .../hadoop/hive/ql/plan/VectorSelectDesc.java   |    56 -
 .../ql/plan/VectorSparkHashTableSinkDesc.java   |    35 -
 .../hive/ql/plan/VectorTableScanDesc.java       |    45 -
 .../hive/ql/plan/VectorizationCondition.java    |    76 -
 .../exec/vector/TestVectorFilterOperator.java   |    15 +-
 .../exec/vector/TestVectorGroupByOperator.java  |    90 +-
 .../exec/vector/TestVectorSelectOperator.java   |    16 -
 .../ql/optimizer/physical/TestVectorizer.java   |    16 +-
 .../clientpositive/schema_evol_orc_vec_part.q   |    20 +-
 .../schema_evol_orc_vec_part_all_complex.q      |     8 +-
 .../schema_evol_orc_vec_part_all_primitive.q    |    12 +-
 .../clientpositive/schema_evol_orc_vec_table.q  |    12 +-
 .../clientpositive/schema_evol_text_vec_part.q  |    20 +-
 .../schema_evol_text_vec_part_all_complex.q     |     8 +-
 .../schema_evol_text_vec_part_all_primitive.q   |    12 +-
 .../clientpositive/schema_evol_text_vec_table.q |    12 +-
 .../schema_evol_text_vecrow_part.q              |    20 +-
 .../schema_evol_text_vecrow_part_all_complex.q  |     8 +-
 ...schema_evol_text_vecrow_part_all_primitive.q |    12 +-
 .../schema_evol_text_vecrow_table.q             |    12 +-
 .../clientpositive/vector_adaptor_usage_mode.q  |    24 +-
 .../queries/clientpositive/vector_aggregate_9.q |     3 +-
 .../vector_aggregate_without_gby.q              |     4 +-
 .../clientpositive/vector_auto_smb_mapjoin_14.q |    30 +-
 .../clientpositive/vector_between_columns.q     |     4 +-
 .../queries/clientpositive/vector_between_in.q  |    25 +-
 .../clientpositive/vector_binary_join_groupby.q |     7 +-
 .../test/queries/clientpositive/vector_bround.q |     4 +-
 .../test/queries/clientpositive/vector_bucket.q |     3 +-
 .../clientpositive/vector_cast_constant.q       |     4 +-
 .../test/queries/clientpositive/vector_char_2.q |     6 +-
 .../test/queries/clientpositive/vector_char_4.q |     3 +-
 .../queries/clientpositive/vector_char_cast.q   |     2 -
 .../clientpositive/vector_char_mapjoin1.q       |     6 +-
 .../queries/clientpositive/vector_char_simple.q |     8 +-
 .../queries/clientpositive/vector_coalesce.q    |    13 +-
 .../queries/clientpositive/vector_coalesce_2.q  |     8 +-
 .../queries/clientpositive/vector_complex_all.q |     6 +-
 .../clientpositive/vector_complex_join.q        |     4 +-
 .../test/queries/clientpositive/vector_count.q  |     8 +-
 .../clientpositive/vector_count_distinct.q      |     3 +-
 .../queries/clientpositive/vector_data_types.q  |     5 +-
 .../test/queries/clientpositive/vector_date_1.q |     3 +-
 .../queries/clientpositive/vector_decimal_1.q   |     2 +-
 .../clientpositive/vector_decimal_10_0.q        |     2 +-
 .../queries/clientpositive/vector_decimal_2.q   |     2 +-
 .../queries/clientpositive/vector_decimal_3.q   |     2 +-
 .../queries/clientpositive/vector_decimal_4.q   |     2 +-
 .../queries/clientpositive/vector_decimal_5.q   |     2 +-
 .../queries/clientpositive/vector_decimal_6.q   |     2 +-
 .../clientpositive/vector_decimal_aggregate.q   |     6 +-
 .../clientpositive/vector_decimal_cast.q        |     3 +-
 .../clientpositive/vector_decimal_expressions.q |     3 +-
 .../clientpositive/vector_decimal_mapjoin.q     |     3 +-
 .../clientpositive/vector_decimal_math_funcs.q  |     4 +-
 .../clientpositive/vector_decimal_precision.q   |     4 +-
 .../clientpositive/vector_decimal_round.q       |    14 +-
 .../clientpositive/vector_decimal_round_2.q     |    10 +-
 .../clientpositive/vector_decimal_trailing.q    |     2 +-
 .../clientpositive/vector_decimal_udf2.q        |     6 +-
 .../queries/clientpositive/vector_distinct_2.q  |     3 +-
 ql/src/test/queries/clientpositive/vector_elt.q |     5 +-
 .../queries/clientpositive/vector_empty_where.q |     8 +-
 .../queries/clientpositive/vector_groupby4.q    |     2 +-
 .../queries/clientpositive/vector_groupby6.q    |     2 +-
 .../queries/clientpositive/vector_groupby_3.q   |     3 +-
 .../clientpositive/vector_groupby_mapjoin.q     |     4 +-
 .../clientpositive/vector_groupby_reduce.q      |     9 +-
 .../clientpositive/vector_grouping_sets.q       |     4 +-
 .../queries/clientpositive/vector_if_expr.q     |     4 +-
 .../clientpositive/vector_include_no_sel.q      |     3 +-
 .../queries/clientpositive/vector_inner_join.q  |    19 +-
 .../queries/clientpositive/vector_interval_1.q  |    19 +-
 .../queries/clientpositive/vector_interval_2.q  |    22 +-
 .../clientpositive/vector_interval_arithmetic.q |    16 +-
 .../clientpositive/vector_interval_mapjoin.q    |     3 +-
 .../test/queries/clientpositive/vector_join.q   |     1 -
 .../test/queries/clientpositive/vector_join30.q |    16 +-
 .../clientpositive/vector_join_part_col_char.q  |     3 +-
 .../clientpositive/vector_left_outer_join.q     |     4 +-
 .../clientpositive/vector_left_outer_join2.q    |    12 +-
 .../clientpositive/vector_leftsemi_mapjoin.q    |   361 +-
 .../clientpositive/vector_mapjoin_reduce.q      |     5 +-
 .../vector_mr_diff_schema_alias.q               |     3 +-
 .../clientpositive/vector_multi_insert.q        |     4 +-
 .../vector_non_constant_in_expr.q               |     2 +-
 .../vector_non_string_partition.q               |     5 +-
 .../clientpositive/vector_null_projection.q     |     4 +-
 .../clientpositive/vector_nullsafe_join.q       |    21 +-
 .../vector_number_compare_projection.q          |     4 +-
 ql/src/test/queries/clientpositive/vector_nvl.q |     9 +-
 .../queries/clientpositive/vector_orderby_5.q   |     3 +-
 .../queries/clientpositive/vector_outer_join0.q |     5 +-
 .../queries/clientpositive/vector_outer_join1.q |     7 +-
 .../queries/clientpositive/vector_outer_join2.q |     3 +-
 .../queries/clientpositive/vector_outer_join3.q |     7 +-
 .../queries/clientpositive/vector_outer_join4.q |     7 +-
 .../queries/clientpositive/vector_outer_join5.q |    21 +-
 .../queries/clientpositive/vector_outer_join6.q |     5 +-
 .../vector_partition_diff_num_cols.q            |    12 +-
 .../vector_partitioned_date_time.q              |    18 +-
 .../vector_partitioned_date_time_win.q          |    16 +-
 .../queries/clientpositive/vector_reduce1.q     |     3 +-
 .../queries/clientpositive/vector_reduce2.q     |     3 +-
 .../queries/clientpositive/vector_reduce3.q     |     3 +-
 .../vector_reduce_groupby_decimal.q             |     4 +-
 .../clientpositive/vector_string_concat.q       |     5 +-
 .../clientpositive/vector_string_decimal.q      |     2 +-
 .../queries/clientpositive/vector_struct_in.q   |    19 +-
 .../clientpositive/vector_tablesample_rows.q    |     8 +-
 .../test/queries/clientpositive/vector_udf2.q   |     2 +-
 .../test/queries/clientpositive/vector_udf3.q   |     3 +-
 .../queries/clientpositive/vector_varchar_4.q   |     3 +-
 .../clientpositive/vector_varchar_mapjoin1.q    |     6 +-
 .../clientpositive/vector_varchar_simple.q      |     8 +-
 .../clientpositive/vector_when_case_null.q      |     2 +-
 .../queries/clientpositive/vectorization_0.q    |    21 +-
 .../queries/clientpositive/vectorization_1.q    |     1 -
 .../queries/clientpositive/vectorization_10.q   |     1 -
 .../queries/clientpositive/vectorization_11.q   |     1 -
 .../queries/clientpositive/vectorization_12.q   |     1 -
 .../queries/clientpositive/vectorization_13.q   |     6 +-
 .../queries/clientpositive/vectorization_14.q   |     3 +-
 .../queries/clientpositive/vectorization_15.q   |     3 +-
 .../queries/clientpositive/vectorization_16.q   |     3 +-
 .../queries/clientpositive/vectorization_17.q   |     3 +-
 .../queries/clientpositive/vectorization_2.q    |     1 -
 .../queries/clientpositive/vectorization_3.q    |     1 -
 .../queries/clientpositive/vectorization_4.q    |     1 -
 .../queries/clientpositive/vectorization_5.q    |     1 -
 .../queries/clientpositive/vectorization_6.q    |     1 -
 .../queries/clientpositive/vectorization_7.q    |     6 +-
 .../queries/clientpositive/vectorization_8.q    |     6 +-
 .../queries/clientpositive/vectorization_9.q    |     3 +-
 .../clientpositive/vectorization_decimal_date.q |     4 +-
 .../queries/clientpositive/vectorization_div0.q |     7 +-
 .../clientpositive/vectorization_limit.q        |    16 +-
 .../clientpositive/vectorization_nested_udf.q   |     2 -
 .../queries/clientpositive/vectorization_not.q  |     2 -
 .../clientpositive/vectorization_offset_limit.q |     5 +-
 .../queries/clientpositive/vectorization_part.q |     2 -
 .../clientpositive/vectorization_part_project.q |     4 +-
 .../clientpositive/vectorization_part_varchar.q |     2 -
 .../clientpositive/vectorization_pushdown.q     |     4 +-
 .../vectorization_short_regress.q               |    54 +-
 .../clientpositive/vectorized_bucketmapjoin1.q  |     8 +-
 .../queries/clientpositive/vectorized_case.q    |     4 +-
 .../queries/clientpositive/vectorized_casts.q   |     2 +-
 .../queries/clientpositive/vectorized_context.q |     4 +-
 .../clientpositive/vectorized_date_funcs.q      |    11 +-
 .../clientpositive/vectorized_distinct_gby.q    |     5 +-
 .../vectorized_dynamic_partition_pruning.q      |    79 +-
 .../queries/clientpositive/vectorized_mapjoin.q |     3 +-
 .../clientpositive/vectorized_mapjoin2.q        |     2 +-
 .../clientpositive/vectorized_math_funcs.q      |     3 +-
 .../clientpositive/vectorized_nested_mapjoin.q  |     3 +-
 .../queries/clientpositive/vectorized_parquet.q |     4 +-
 .../clientpositive/vectorized_parquet_types.q   |     6 +-
 .../queries/clientpositive/vectorized_ptf.q     |    47 +-
 .../clientpositive/vectorized_shufflejoin.q     |     3 +-
 .../clientpositive/vectorized_string_funcs.q    |     3 +-
 .../clientpositive/vectorized_timestamp.q       |     8 +-
 .../clientpositive/vectorized_timestamp_funcs.q |    15 +-
 .../vectorized_timestamp_ints_casts.q           |     5 +-
 .../llap/schema_evol_orc_vec_part.q.out         |   666 +-
 .../schema_evol_orc_vec_part_all_complex.q.out  |   168 +-
 ...schema_evol_orc_vec_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_orc_vec_table.q.out        |   365 +-
 .../llap/schema_evol_text_vec_part.q.out        |   666 +-
 .../schema_evol_text_vec_part_all_complex.q.out |   168 +-
 ...chema_evol_text_vec_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_text_vec_table.q.out       |   365 +-
 .../llap/schema_evol_text_vecrow_part.q.out     |   666 +-
 ...hema_evol_text_vecrow_part_all_complex.q.out |   168 +-
 ...ma_evol_text_vecrow_part_all_primitive.q.out |   370 +-
 .../llap/schema_evol_text_vecrow_table.q.out    |   365 +-
 .../llap/vector_aggregate_9.q.out               |    35 +-
 .../llap/vector_aggregate_without_gby.q.out     |     4 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |  1973 +--
 .../llap/vector_between_columns.q.out           |   115 +-
 .../clientpositive/llap/vector_between_in.q.out |   600 +-
 .../llap/vector_binary_join_groupby.q.out       |   152 +-
 .../clientpositive/llap/vector_bround.q.out     |    15 +-
 .../clientpositive/llap/vector_bucket.q.out     |    27 +-
 .../llap/vector_cast_constant.q.out             |    53 +-
 .../clientpositive/llap/vector_char_2.q.out     |   144 +-
 .../clientpositive/llap/vector_char_4.q.out     |    27 +-
 .../llap/vector_char_mapjoin1.q.out             |   220 +-
 .../llap/vector_char_simple.q.out               |   209 +-
 .../clientpositive/llap/vector_coalesce.q.out   |   473 +-
 .../clientpositive/llap/vector_coalesce_2.q.out |   100 +-
 .../llap/vector_complex_all.q.out               |   106 +-
 .../llap/vector_complex_join.q.out              |    40 +-
 .../clientpositive/llap/vector_count.q.out      |   146 +-
 .../llap/vector_count_distinct.q.out            |    73 +-
 .../clientpositive/llap/vector_data_types.q.out |    53 +-
 .../llap/vector_decimal_aggregate.q.out         |    95 +-
 .../llap/vector_decimal_cast.q.out              |    77 +-
 .../llap/vector_decimal_expressions.q.out       |    50 +-
 .../llap/vector_decimal_mapjoin.q.out           |    59 +-
 .../llap/vector_decimal_math_funcs.q.out        |    69 +-
 .../llap/vector_decimal_precision.q.out         |    35 +-
 .../llap/vector_decimal_round.q.out             |   189 +-
 .../llap/vector_decimal_round_2.q.out           |   173 +-
 .../llap/vector_decimal_udf2.q.out              |    62 +-
 .../clientpositive/llap/vector_distinct_2.q.out |    53 +-
 .../clientpositive/llap/vector_elt.q.out        |   145 +-
 .../clientpositive/llap/vector_groupby4.q.out   |    62 +-
 .../clientpositive/llap/vector_groupby6.q.out   |    62 +-
 .../clientpositive/llap/vector_groupby_3.q.out  |    55 +-
 .../llap/vector_groupby_mapjoin.q.out           |   238 +-
 .../llap/vector_groupby_reduce.q.out            |   286 +-
 .../llap/vector_grouping_sets.q.out             |    53 +-
 .../clientpositive/llap/vector_if_expr.q.out    |    47 +-
 .../llap/vector_include_no_sel.q.out            |    75 +-
 .../clientpositive/llap/vector_inner_join.q.out |   686 +-
 .../clientpositive/llap/vector_interval_1.q.out |   347 +-
 .../clientpositive/llap/vector_interval_2.q.out |   448 +-
 .../llap/vector_interval_arithmetic.q.out       |   334 +-
 .../llap/vector_interval_mapjoin.q.out          |    63 +-
 .../clientpositive/llap/vector_join30.q.out     |   907 +-
 .../llap/vector_join_part_col_char.q.out        |     4 +-
 .../llap/vector_left_outer_join.q.out           |    39 +-
 .../llap/vector_left_outer_join2.q.out          |   230 +-
 .../llap/vector_leftsemi_mapjoin.q.out          | 11096 +++++------------
 .../llap/vector_mapjoin_reduce.q.out            |    88 +-
 .../llap/vector_mr_diff_schema_alias.q.out      |    44 +-
 .../llap/vector_multi_insert.q.out              |    16 +-
 .../llap/vector_null_projection.q.out           |    39 +-
 .../llap/vector_nullsafe_join.q.out             |   628 +-
 .../llap/vector_number_compare_projection.q.out |    68 +-
 .../clientpositive/llap/vector_nvl.q.out        |   265 +-
 .../clientpositive/llap/vector_orderby_5.q.out  |    67 +-
 .../llap/vector_outer_join0.q.out               |   132 +-
 .../llap/vector_outer_join1.q.out               |   252 +-
 .../llap/vector_outer_join2.q.out               |   123 +-
 .../llap/vector_outer_join3.q.out               |   327 +-
 .../llap/vector_outer_join4.q.out               |   242 +-
 .../llap/vector_outer_join5.q.out               |   914 +-
 .../llap/vector_outer_join6.q.out               |   199 +-
 .../llap/vector_partition_diff_num_cols.q.out   |   250 +-
 .../llap/vector_partitioned_date_time.q.out     |  1075 +-
 .../clientpositive/llap/vector_reduce1.q.out    |    42 +-
 .../clientpositive/llap/vector_reduce2.q.out    |    42 +-
 .../clientpositive/llap/vector_reduce3.q.out    |    42 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |    71 +-
 .../llap/vector_string_concat.q.out             |   138 +-
 .../clientpositive/llap/vector_struct_in.q.out  |   568 +-
 .../clientpositive/llap/vector_varchar_4.q.out  |    27 +-
 .../llap/vector_varchar_mapjoin1.q.out          |    93 +-
 .../llap/vector_varchar_simple.q.out            |    99 +-
 .../llap/vector_when_case_null.q.out            |    52 +-
 .../clientpositive/llap/vectorization_0.q.out   |   519 +-
 .../clientpositive/llap/vectorization_13.q.out  |   114 +-
 .../clientpositive/llap/vectorization_14.q.out  |    28 +-
 .../clientpositive/llap/vectorization_15.q.out  |    28 +-
 .../clientpositive/llap/vectorization_16.q.out  |    21 +-
 .../clientpositive/llap/vectorization_17.q.out  |    23 +-
 .../clientpositive/llap/vectorization_7.q.out   |   100 +-
 .../clientpositive/llap/vectorization_8.q.out   |   100 +-
 .../clientpositive/llap/vectorization_9.q.out   |    21 +-
 .../llap/vectorization_decimal_date.q.out       |    76 +-
 .../llap/vectorization_part_project.q.out       |    23 +-
 .../llap/vectorization_pushdown.q.out           |    21 +-
 .../llap/vectorization_short_regress.q.out      |  1002 +-
 .../llap/vectorized_bucketmapjoin1.q.out        |   108 +-
 .../clientpositive/llap/vectorized_case.q.out   |    62 +-
 .../clientpositive/llap/vectorized_casts.q.out  |    16 +-
 .../llap/vectorized_context.q.out               |    32 +-
 .../llap/vectorized_date_funcs.q.out            |   314 +-
 .../llap/vectorized_distinct_gby.q.out          |    51 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  1159 +-
 .../llap/vectorized_mapjoin.q.out               |    71 +-
 .../llap/vectorized_math_funcs.q.out            |    69 +-
 .../llap/vectorized_nested_mapjoin.q.out        |    39 +-
 .../llap/vectorized_parquet.q.out               |    25 +-
 .../llap/vectorized_parquet_types.q.out         |    12 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |   668 +-
 .../llap/vectorized_shufflejoin.q.out           |    73 +-
 .../llap/vectorized_string_funcs.q.out          |    54 +-
 .../llap/vectorized_timestamp.q.out             |    16 +-
 .../llap/vectorized_timestamp_funcs.q.out       |   292 +-
 .../llap/vectorized_timestamp_ints_casts.q.out  |   138 +-
 .../spark/vector_between_in.q.out               |   604 +-
 .../spark/vector_cast_constant.q.out            |    53 +-
 .../clientpositive/spark/vector_char_4.q.out    |    27 +-
 .../spark/vector_count_distinct.q.out           |    74 +-
 .../spark/vector_data_types.q.out               |    53 +-
 .../spark/vector_decimal_aggregate.q.out        |    96 +-
 .../spark/vector_decimal_mapjoin.q.out          |    58 +-
 .../spark/vector_distinct_2.q.out               |    54 +-
 .../clientpositive/spark/vector_elt.q.out       |   143 +-
 .../clientpositive/spark/vector_groupby_3.q.out |    56 +-
 .../spark/vector_inner_join.q.out               |   678 +-
 .../spark/vector_left_outer_join.q.out          |    39 +-
 .../spark/vector_mapjoin_reduce.q.out           |    72 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    68 +-
 .../spark/vector_outer_join0.q.out              |   130 +-
 .../spark/vector_outer_join1.q.out              |   248 +-
 .../spark/vector_outer_join2.q.out              |   121 +-
 .../spark/vector_outer_join3.q.out              |   342 +-
 .../spark/vector_outer_join4.q.out              |   254 +-
 .../spark/vector_outer_join5.q.out              |   968 +-
 .../spark/vector_string_concat.q.out            |   137 +-
 .../clientpositive/spark/vector_varchar_4.q.out |    27 +-
 .../clientpositive/spark/vectorization_0.q.out  |   519 +-
 .../clientpositive/spark/vectorization_13.q.out |   114 +-
 .../clientpositive/spark/vectorization_14.q.out |    28 +-
 .../clientpositive/spark/vectorization_15.q.out |    28 +-
 .../clientpositive/spark/vectorization_16.q.out |    21 +-
 .../clientpositive/spark/vectorization_17.q.out |    23 +-
 .../clientpositive/spark/vectorization_7.q.out  |   118 +-
 .../clientpositive/spark/vectorization_8.q.out  |   146 +-
 .../clientpositive/spark/vectorization_9.q.out  |    21 +-
 .../spark/vectorization_decimal_date.q.out      |    75 +-
 .../spark/vectorization_div0.q.out              |   167 +-
 .../spark/vectorization_part_project.q.out      |    23 +-
 .../spark/vectorization_pushdown.q.out          |    21 +-
 .../spark/vectorization_short_regress.q.out     |  1002 +-
 .../spark/vectorized_bucketmapjoin1.q.out       |   135 +-
 .../clientpositive/spark/vectorized_case.q.out  |    62 +-
 .../spark/vectorized_mapjoin.q.out              |    70 +-
 .../spark/vectorized_math_funcs.q.out           |    68 +-
 .../spark/vectorized_nested_mapjoin.q.out       |    39 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |   693 +-
 .../spark/vectorized_shufflejoin.q.out          |    80 +-
 .../spark/vectorized_string_funcs.q.out         |    53 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   292 +-
 .../tez/vector_join_part_col_char.q.out         |     4 +-
 .../tez/vector_non_string_partition.q.out       |    98 +-
 .../clientpositive/tez/vectorization_div0.q.out |   167 +-
 .../tez/vectorization_limit.q.out               |   339 +-
 .../vector_adaptor_usage_mode.q.out             |   230 +-
 .../clientpositive/vector_aggregate_9.q.out     |    34 +-
 .../vector_aggregate_without_gby.q.out          |    20 +-
 .../vector_auto_smb_mapjoin_14.q.out            |   604 +-
 .../clientpositive/vector_between_columns.q.out |    75 +-
 .../clientpositive/vector_between_in.q.out      |   466 +-
 .../vector_binary_join_groupby.q.out            |    96 +-
 .../results/clientpositive/vector_bround.q.out  |    16 +-
 .../results/clientpositive/vector_bucket.q.out  |    16 +-
 .../clientpositive/vector_cast_constant.q.out   |    43 +-
 .../results/clientpositive/vector_char_2.q.out  |    96 +-
 .../results/clientpositive/vector_char_4.q.out  |    27 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |   123 +-
 .../clientpositive/vector_char_simple.q.out     |   180 +-
 .../clientpositive/vector_coalesce.q.out        |   394 +-
 .../clientpositive/vector_coalesce_2.q.out      |    83 +-
 .../clientpositive/vector_complex_all.q.out     |    84 +-
 .../clientpositive/vector_complex_join.q.out    |    28 +-
 .../results/clientpositive/vector_count.q.out   |   142 +-
 .../clientpositive/vector_count_distinct.q.out  |    39 +-
 .../clientpositive/vector_data_types.q.out      |    40 +-
 .../vector_decimal_aggregate.q.out              |    74 +-
 .../clientpositive/vector_decimal_cast.q.out    |    34 +-
 .../vector_decimal_expressions.q.out            |    37 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |    36 +-
 .../vector_decimal_math_funcs.q.out             |    31 +-
 .../vector_decimal_precision.q.out              |    34 +-
 .../clientpositive/vector_decimal_round.q.out   |   129 +-
 .../clientpositive/vector_decimal_round_2.q.out |   132 +-
 .../clientpositive/vector_decimal_udf2.q.out    |    62 +-
 .../clientpositive/vector_distinct_2.q.out      |    38 +-
 .../results/clientpositive/vector_elt.q.out     |   101 +-
 .../clientpositive/vector_empty_where.q.out     |   168 +-
 .../clientpositive/vector_groupby4.q.out        |    41 +-
 .../clientpositive/vector_groupby6.q.out        |    41 +-
 .../clientpositive/vector_groupby_3.q.out       |    39 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |    24 +-
 .../clientpositive/vector_groupby_reduce.q.out  |   178 +-
 .../clientpositive/vector_grouping_sets.q.out   |    36 +-
 .../results/clientpositive/vector_if_expr.q.out |    37 +-
 .../clientpositive/vector_include_no_sel.q.out  |    47 +-
 .../clientpositive/vector_inner_join.q.out      |   396 +-
 .../clientpositive/vector_interval_1.q.out      |   264 +-
 .../clientpositive/vector_interval_2.q.out      |   348 +-
 .../vector_interval_arithmetic.q.out            |   260 +-
 .../vector_interval_mapjoin.q.out               |    40 +-
 .../results/clientpositive/vector_join30.q.out  |   700 +-
 .../vector_join_part_col_char.q.out             |    15 +-
 .../clientpositive/vector_left_outer_join.q.out |    20 +-
 .../vector_left_outer_join2.q.out               |   156 +-
 .../vector_leftsemi_mapjoin.q.out               |  6989 +++++------
 .../clientpositive/vector_mapjoin_reduce.q.out  |    56 +-
 .../vector_mr_diff_schema_alias.q.out           |    38 +-
 .../clientpositive/vector_multi_insert.q.out    |    16 +-
 .../vector_non_constant_in_expr.q.out           |    14 +-
 .../vector_non_string_partition.q.out           |    72 +-
 .../clientpositive/vector_null_projection.q.out |    29 +-
 .../clientpositive/vector_nullsafe_join.q.out   |   326 +-
 .../vector_number_compare_projection.q.out      |    66 +-
 .../results/clientpositive/vector_nvl.q.out     |   181 +-
 .../clientpositive/vector_orderby_5.q.out       |    47 +-
 .../clientpositive/vector_outer_join0.q.out     |    74 +-
 .../clientpositive/vector_outer_join1.q.out     |   126 +-
 .../clientpositive/vector_outer_join2.q.out     |    53 +-
 .../clientpositive/vector_outer_join3.q.out     |   309 +-
 .../clientpositive/vector_outer_join4.q.out     |   235 +-
 .../clientpositive/vector_outer_join5.q.out     |   882 +-
 .../clientpositive/vector_outer_join6.q.out     |   190 +-
 .../vector_partition_diff_num_cols.q.out        |   190 +-
 .../vector_partitioned_date_time.q.out          |   348 +-
 .../results/clientpositive/vector_reduce1.q.out |    32 +-
 .../results/clientpositive/vector_reduce2.q.out |    32 +-
 .../results/clientpositive/vector_reduce3.q.out |    32 +-
 .../vector_reduce_groupby_decimal.q.out         |    47 +-
 .../clientpositive/vector_string_concat.q.out   |    77 +-
 .../clientpositive/vector_string_decimal.q.out  |    14 +-
 .../clientpositive/vector_struct_in.q.out       |   228 +-
 .../vector_tablesample_rows.q.out               |    88 +-
 .../results/clientpositive/vector_udf2.q.out    |    30 +-
 .../results/clientpositive/vector_udf3.q.out    |    27 +-
 .../clientpositive/vector_varchar_4.q.out       |    27 +-
 .../vector_varchar_mapjoin1.q.out               |    60 +-
 .../clientpositive/vector_varchar_simple.q.out  |    75 +-
 .../clientpositive/vector_when_case_null.q.out  |    40 +-
 .../clientpositive/vectorization_0.q.out        |   398 +-
 .../clientpositive/vectorization_13.q.out       |    94 +-
 .../clientpositive/vectorization_14.q.out       |    28 +-
 .../clientpositive/vectorization_15.q.out       |    28 +-
 .../clientpositive/vectorization_16.q.out       |    20 +-
 .../clientpositive/vectorization_17.q.out       |    20 +-
 .../clientpositive/vectorization_7.q.out        |    74 +-
 .../clientpositive/vectorization_8.q.out        |    74 +-
 .../clientpositive/vectorization_9.q.out        |    20 +-
 .../vectorization_decimal_date.q.out            |    33 +-
 .../clientpositive/vectorization_div0.q.out     |   104 +-
 .../clientpositive/vectorization_limit.q.out    |   224 +-
 .../vectorization_offset_limit.q.out            |    52 +-
 .../vectorization_part_project.q.out            |    20 +-
 .../clientpositive/vectorization_pushdown.q.out |    20 +-
 .../vectorization_short_regress.q.out           |   800 +-
 .../vectorized_bucketmapjoin1.q.out             |    99 +-
 .../clientpositive/vectorized_case.q.out        |    62 +-
 .../clientpositive/vectorized_casts.q.out       |    16 +-
 .../clientpositive/vectorized_context.q.out     |    16 +-
 .../clientpositive/vectorized_date_funcs.q.out  |   157 +-
 .../vectorized_distinct_gby.q.out               |    40 +-
 .../clientpositive/vectorized_mapjoin.q.out     |    48 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |    47 +-
 .../clientpositive/vectorized_math_funcs.q.out  |    31 +-
 .../vectorized_nested_mapjoin.q.out             |    20 +-
 .../clientpositive/vectorized_parquet.q.out     |    28 +-
 .../vectorized_parquet_types.q.out              |    48 +-
 .../results/clientpositive/vectorized_ptf.q.out |   728 +-
 .../clientpositive/vectorized_shufflejoin.q.out |    31 +-
 .../vectorized_string_funcs.q.out               |    16 +-
 .../clientpositive/vectorized_timestamp.q.out   |    80 +-
 .../vectorized_timestamp_funcs.q.out            |   222 +-
 .../vectorized_timestamp_ints_casts.q.out       |    62 +-
 755 files changed, 17856 insertions(+), 56658 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/common/src/java/org/apache/hive/common/util/DateUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/common/util/DateUtils.java b/common/src/java/org/apache/hive/common/util/DateUtils.java
index a1068ec..959a542 100644
--- a/common/src/java/org/apache/hive/common/util/DateUtils.java
+++ b/common/src/java/org/apache/hive/common/util/DateUtils.java
@@ -54,24 +54,4 @@ public class DateUtils {
     }
     return result;
   }
-
-  // From java.util.Calendar
-  private static final String[] FIELD_NAME = {
-    "ERA", "YEAR", "MONTH", "WEEK_OF_YEAR", "WEEK_OF_MONTH", "DAY_OF_MONTH",
-    "DAY_OF_YEAR", "DAY_OF_WEEK", "DAY_OF_WEEK_IN_MONTH", "AM_PM", "HOUR",
-    "HOUR_OF_DAY", "MINUTE", "SECOND", "MILLISECOND", "ZONE_OFFSET",
-    "DST_OFFSET"
-  };
-
-  /**
-   * Returns the name of the specified calendar field.
-   *
-   * @param field the calendar field
-   * @return the calendar field name
-   * @exception IndexOutOfBoundsException if <code>field</code> is negative,
-   * equal to or greater then <code>FIELD_COUNT</code>.
-   */
-  public static String getFieldName(int field) {
-      return FIELD_NAME[field];
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumn.txt
index d8164a4..e52fcc0 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumn.txt
@@ -157,12 +157,7 @@ public class <ClassName> extends VectorExpression {
   public void setOutputColumn(int outputColumn) {
     this.outputColumn = outputColumn;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
+  
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt
index 31a015f..e1df589 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnDecimal.txt
@@ -167,11 +167,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnWithConvert.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnWithConvert.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnWithConvert.txt
new file mode 100644
index 0000000..bcd10a2
--- /dev/null
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticColumnWithConvert.txt
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
+
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+
+/**
+ * Generated from template ColumnArithmeticColumnWithConvert.txt, which covers binary arithmetic 
+ * expressions between columns.
+ */
+public class <ClassName> extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+  
+  private int colNum1;
+  private int colNum2;
+  private int outputColumn;
+
+  public <ClassName>(int colNum1, int colNum2, int outputColumn) {
+    this.colNum1 = colNum1;
+    this.colNum2 = colNum2;
+    this.outputColumn = outputColumn;
+  }
+
+  public <ClassName>() {
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) {
+
+    if (childExpressions != null) {
+      super.evaluateChildren(batch);
+    }
+
+    <InputColumnVectorType1> inputColVector1 = (<InputColumnVectorType1>) batch.cols[colNum1];
+    <InputColumnVectorType2> inputColVector2 = (<InputColumnVectorType2>) batch.cols[colNum2];
+    <OutputColumnVectorType> outputColVector = (<OutputColumnVectorType>) batch.cols[outputColumn];
+    int[] sel = batch.selected;
+    int n = batch.size;
+    <VectorOperandType1>[] vector1 = inputColVector1.vector;
+    <VectorOperandType2>[] vector2 = inputColVector2.vector;
+    <VectorReturnType>[] outputVector = outputColVector.vector;
+    
+    // return immediately if batch is empty
+    if (n == 0) {
+      return;
+    }
+    
+    outputColVector.isRepeating = 
+         inputColVector1.isRepeating && inputColVector2.isRepeating
+      || inputColVector1.isRepeating && !inputColVector1.noNulls && inputColVector1.isNull[0]
+      || inputColVector2.isRepeating && !inputColVector2.noNulls && inputColVector2.isNull[0];
+    
+    // Handle nulls first  
+    NullUtil.propagateNullsColCol(
+      inputColVector1, inputColVector2, outputColVector, sel, n, batch.selectedInUse);
+          
+    /* Disregard nulls for processing. In other words,
+     * the arithmetic operation is performed even if one or 
+     * more inputs are null. This is to improve speed by avoiding
+     * conditional checks in the inner loop.
+     */ 
+    if (inputColVector1.isRepeating && inputColVector2.isRepeating) { 
+      outputVector[0] = <TypeConversion1>(vector1[0]) <OperatorSymbol> <TypeConversion2>(vector2[0]);
+    } else if (inputColVector1.isRepeating) {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = <TypeConversion1>(vector1[0]) <OperatorSymbol> <TypeConversion2>(vector2[i]);
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = <TypeConversion1>(vector1[0]) <OperatorSymbol> <TypeConversion2>(vector2[i]);
+        }
+      }
+    } else if (inputColVector2.isRepeating) {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = <TypeConversion1>(vector1[i]) <OperatorSymbol> <TypeConversion2>(vector2[0]);
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = <TypeConversion1>(vector1[i]) <OperatorSymbol> <TypeConversion2>(vector2[0]);
+        }
+      }
+    } else {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = <TypeConversion1>(vector1[i]) <OperatorSymbol> <TypeConversion2>(vector2[i]);
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = <TypeConversion1>(vector1[i]) <OperatorSymbol>  <TypeConversion2>(vector2[i]);
+        }
+      }
+    }
+    
+    /* For the case when the output can have null values, follow 
+     * the convention that the data values must be 1 for long and 
+     * NaN for double. This is to prevent possible later zero-divide errors
+     * in complex arithmetic expressions like col2 / (col1 - 1)
+     * in the case when some col1 entries are null.
+     */
+    NullUtil.setNullDataEntries<CamelReturnType>(outputColVector, batch.selectedInUse, sel, n);
+  }
+
+  @Override
+  public int getOutputColumn() {
+    return outputColumn;
+  }
+
+  @Override
+  public String getOutputType() {
+    return "<VectorReturnType>";
+  }
+  
+  public int getColNum1() {
+    return colNum1;
+  }
+
+  public void setColNum1(int colNum1) {
+    this.colNum1 = colNum1;
+  }
+
+  public int getColNum2() {
+    return colNum2;
+  }
+
+  public void setColNum2(int colNum2) {
+    this.colNum2 = colNum2;
+  }
+
+  public void setOutputColumn(int outputColumn) {
+    this.outputColumn = outputColumn;
+  }
+  
+  @Override
+  public VectorExpressionDescriptor.Descriptor getDescriptor() {
+    return (new VectorExpressionDescriptor.Builder())
+        .setMode(
+            VectorExpressionDescriptor.Mode.PROJECTION)
+        .setNumArguments(2)
+        .setArgumentTypes(
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+        .setInputExpressionTypes(
+            VectorExpressionDescriptor.InputExpressionType.COLUMN,
+            VectorExpressionDescriptor.InputExpressionType.COLUMN).build();
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalar.txt
index 2cc1aa2..87335f1 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalar.txt
@@ -134,11 +134,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt
index 294bb4f..0bb1532 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarDecimal.txt
@@ -132,11 +132,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarWithConvert.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarWithConvert.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarWithConvert.txt
new file mode 100644
index 0000000..105eb92
--- /dev/null
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnArithmeticScalarWithConvert.txt
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
+
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.<InputColumnVectorType>;
+import org.apache.hadoop.hive.ql.exec.vector.<OutputColumnVectorType>;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+
+/**
+ * Generated from template ColumnArithmeticScalarWithConvert.txt, which covers binary arithmetic 
+ * expressions between a column and a scalar.
+ */
+public class <ClassName> extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private int colNum;
+  private <VectorOperandType2> value;
+  private int outputColumn;
+
+  public <ClassName>(int colNum, <VectorOperandType2> value, int outputColumn) {
+    this.colNum = colNum;
+    this.value = <TypeConversion2>(value);
+    this.outputColumn = outputColumn;
+  }
+
+  public <ClassName>() {
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) {
+
+    if (childExpressions != null) {
+      super.evaluateChildren(batch);
+    }
+
+    <InputColumnVectorType> inputColVector = (<InputColumnVectorType>) batch.cols[colNum];
+    <OutputColumnVectorType> outputColVector = (<OutputColumnVectorType>) batch.cols[outputColumn];
+    int[] sel = batch.selected;
+    boolean[] inputIsNull = inputColVector.isNull;
+    boolean[] outputIsNull = outputColVector.isNull;
+    outputColVector.noNulls = inputColVector.noNulls;
+    outputColVector.isRepeating = inputColVector.isRepeating;
+    int n = batch.size;
+    <VectorOperandType1>[] vector = inputColVector.vector;
+    <VectorReturnType>[] outputVector = outputColVector.vector;
+    
+    // return immediately if batch is empty
+    if (n == 0) {
+      return;
+    }
+
+    if (inputColVector.isRepeating) {
+      outputVector[0] = <TypeConversion1>(vector[0]) <OperatorSymbol> value;
+      
+      // Even if there are no nulls, we always copy over entry 0. Simplifies code.
+      outputIsNull[0] = inputIsNull[0]; 
+    } else if (inputColVector.noNulls) {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = <TypeConversion1>(vector[i]) <OperatorSymbol> value;
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = <TypeConversion1>(vector[i]) <OperatorSymbol> value;
+        }
+      }
+    } else /* there are nulls */ {
+      if (batch.selectedInUse) {
+        for(int j = 0; j != n; j++) {
+          int i = sel[j];
+          outputVector[i] = <TypeConversion1>(vector[i]) <OperatorSymbol> value;
+          outputIsNull[i] = inputIsNull[i];
+        }
+      } else {
+        for(int i = 0; i != n; i++) {
+          outputVector[i] = <TypeConversion1>(vector[i]) <OperatorSymbol> value;
+        }
+        System.arraycopy(inputIsNull, 0, outputIsNull, 0, n);
+      }
+    }
+    
+    NullUtil.setNullOutputEntriesColScalar(outputColVector, batch.selectedInUse, sel, n);
+  }
+
+  @Override
+  public int getOutputColumn() {
+    return outputColumn;
+  }
+  
+  @Override
+  public String getOutputType() {
+    return "<VectorReturnType>";
+  }
+  
+  public int getColNum() {
+    return colNum;
+  }
+  
+  public void setColNum(int colNum) {
+    this.colNum = colNum;
+  }
+
+  public <VectorOperandType2> getValue() {
+    return value;
+  }
+
+  public void setValue(<VectorOperandType2> value) {
+    this.value = value;
+  }
+
+  public void setOutputColumn(int outputColumn) {
+    this.outputColumn = outputColumn;
+  }
+
+  @Override
+  public VectorExpressionDescriptor.Descriptor getDescriptor() {
+    return (new VectorExpressionDescriptor.Builder())
+        .setMode(
+            VectorExpressionDescriptor.Mode.PROJECTION)
+        .setNumArguments(2)
+        .setArgumentTypes(
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType1>"),
+            VectorExpressionDescriptor.ArgumentType.getType("<OperandType2>"))
+        .setInputExpressionTypes(
+            VectorExpressionDescriptor.InputExpressionType.COLUMN,
+            VectorExpressionDescriptor.InputExpressionType.SCALAR).build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt
index cbc97da..f2b4c81 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareColumn.txt
@@ -157,11 +157,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt
index 6568d1c..2438ee4 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnCompareScalar.txt
@@ -149,11 +149,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumn.txt
index 04b533a..b0f6eb1 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumn.txt
@@ -184,11 +184,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt
index 68c4f58..623bcfb 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideColumnDecimal.txt
@@ -139,11 +139,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalar.txt
index 25e0d85..c6614ab 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalar.txt
@@ -139,11 +139,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt
index 0728f6c..841ef93 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnDivideScalarDecimal.txt
@@ -138,11 +138,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt
index efbf1ba..cf690db 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryFunc.txt
@@ -122,11 +122,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryMinus.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryMinus.txt b/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryMinus.txt
index 6574267..b52b7c7 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryMinus.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/ColumnUnaryMinus.txt
@@ -124,11 +124,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthColumn.txt
index 2a9f947..c3d8d7e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthColumn.txt
@@ -181,11 +181,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthScalar.txt
index 4bbc358..d1474fb 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticIntervalYearMonthScalar.txt
@@ -141,11 +141,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
index 2e66b3a..63cebaf 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
@@ -170,11 +170,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampScalar.txt
index e679449..7aee529 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampScalar.txt
@@ -139,11 +139,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticIntervalYearMonthColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticIntervalYearMonthColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticIntervalYearMonthColumn.txt
index e23dc27..c68ac34 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticIntervalYearMonthColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticIntervalYearMonthColumn.txt
@@ -155,11 +155,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticTimestampColumn.txt
index 85d88fd..cb6b750 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DateScalarArithmeticTimestampColumn.txt
@@ -146,11 +146,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt b/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt
index 0b7fefc..619015e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt
@@ -119,11 +119,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
index 0a9c444..e9aaaf2 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
@@ -173,12 +173,7 @@ public class <ClassName> extends VectorExpression {
   public void setRightValue(<OperandType> value) {
     this.leftValue = value;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", left " + leftValue + ", right " + rightValue;
-  }
-
+  
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
index ee80606..e25b9c2 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
@@ -33,8 +33,8 @@ public class <ClassName> extends VectorExpression {
 
   private static final long serialVersionUID = 1L;
 
-  protected int colNum1;
-  protected int colNum2;
+  private int colNum1;
+  private int colNum2;
 
   public <ClassName>(int colNum1, int colNum2) { 
     this.colNum1 = colNum1;
@@ -182,11 +182,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
index 248a66a..b0f6e5c 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
@@ -32,8 +32,8 @@ public class <ClassName> extends VectorExpression {
 
   private static final long serialVersionUID = 1L;
 
-  protected int colNum;
-  protected <OperandType2> value;
+  private int colNum;
+  private <OperandType2> value;
 
   public <ClassName>(int colNum, <OperandType2> value) { 
     this.colNum = colNum;
@@ -158,11 +158,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
index 4cef036..d68edfa 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
@@ -155,11 +155,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", left " + leftValue.toString() + ", right " + rightValue.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
index ee450d3..a2352c6 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
@@ -430,11 +430,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
index 9943f45..bdd39b9 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
@@ -145,11 +145,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
index 4477aff..0608016 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
@@ -145,11 +145,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value.toString() + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
index 610c062..57caf7e 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
@@ -170,11 +170,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
index 73c46a1..f5f59c2 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
@@ -154,11 +154,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
index 037382c..b7544c7 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
@@ -32,8 +32,8 @@ public class <ClassName> extends VectorExpression {
 
   private static final long serialVersionUID = 1L;
 
-  protected int colNum;
-  protected <OperandType1> value;
+  private int colNum;
+  private <OperandType1> value;
 
   public <ClassName>(<OperandType1> value, int colNum) { 
     this.colNum = colNum;
@@ -158,11 +158,6 @@ public class <ClassName> extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt
index 6cbfca1..e8049da 100644
--- a/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt
+++ b/ql/src/gen/vectorization/ExpressionTemplates/FilterStringColumnBetween.txt
@@ -18,8 +18,6 @@
  
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -178,12 +176,7 @@ public class <ClassName> extends VectorExpression {
   public void setRight(byte[] value) {
     this.right = value;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", left " + new String(left, StandardCharsets.UTF_8) + ", right " + new String(right, StandardCharsets.UTF_8);
-  }
-
+  
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())


[29/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
index 9a09b89..ff658d7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
@@ -1,6 +1,6 @@
 Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: -- HIVE-12738 -- We are checking if a MapJoin after a GroupBy will work properly.
-explain vectorization expression
+explain
 select *
 from src
 where not key in
@@ -8,199 +8,65 @@ where not key in
 order by key
 PREHOOK: type: QUERY
 POSTHOOK: query: -- HIVE-12738 -- We are checking if a MapJoin after a GroupBy will work properly.
-explain vectorization expression
+explain
 select *
 from src
 where not key in
 (select key from src)
 order by key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Map 1 <- Map 5 (BROADCAST_EDGE), Reducer 4 (BROADCAST_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 4 <- Map 3 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Map 1 <- Map 5 (BROADCAST_EDGE), Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                    Map Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 
-                        1 
-                      outputColumnNames: _col0, _col1
-                      input vertices:
-                        1 Reducer 4
-                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                      Map Join Operator
-                        condition map:
-                             Left Outer Join0 to 1
-                        keys:
-                          0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col0, _col1, _col3
-                        input vertices:
-                          1 Map 5
-                        Statistics: Num rows: 1219 Data size: 323035 Basic stats: COMPLETE Column stats: COMPLETE
-                        Filter Operator
-                          predicate: _col3 is null (type: boolean)
-                          Statistics: Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
-                          Select Operator
-                            expressions: _col0 (type: string), _col1 (type: string)
-                            outputColumnNames: _col0, _col1
-                            Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-                            Reduce Output Operator
-                              key expressions: _col0 (type: string)
-                              sort order: +
-                              Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-                              value expressions: _col1 (type: string)
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: key is null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: bigint)
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: key (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
-                Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Filter Operator
-                  Filter Vectorization:
-                      className: VectorFilterOperator
-                      native: true
-                      predicateExpression: FilterLongColEqualLongScalar(col 0, val 0) -> boolean
-                  predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: []
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_36]
+        Select Operator [SEL_35] (rows=1 width=178)
+          Output:["_col0","_col1"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_21]
+            Select Operator [SEL_20] (rows=1 width=178)
+              Output:["_col0","_col1"]
+              Filter Operator [FIL_19] (rows=1 width=265)
+                predicate:_col3 is null
+                Map Join Operator [MAPJOIN_28] (rows=1219 width=265)
+                  Conds:MAPJOIN_27._col0=RS_17._col0(Left Outer),Output:["_col0","_col1","_col3"]
+                <-Map 5 [BROADCAST_EDGE] llap
+                  BROADCAST [RS_17]
+                    PartitionCols:_col0
+                    Select Operator [SEL_12] (rows=500 width=87)
+                      Output:["_col0"]
+                      TableScan [TS_11] (rows=500 width=87)
+                        default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                <-Map Join Operator [MAPJOIN_27] (rows=500 width=178)
+                    Conds:(Inner),Output:["_col0","_col1"]
+                  <-Reducer 4 [BROADCAST_EDGE] vectorized, llap
+                    BROADCAST [RS_34]
+                      Select Operator [SEL_33] (rows=1 width=8)
+                        Filter Operator [FIL_32] (rows=1 width=8)
+                          predicate:(_col0 = 0)
+                          Group By Operator [GBY_31] (rows=1 width=8)
+                            Output:["_col0"],aggregations:["count(VALUE._col0)"]
+                          <-Map 3 [SIMPLE_EDGE] llap
+                            SHUFFLE [RS_6]
+                              Group By Operator [GBY_5] (rows=1 width=8)
+                                Output:["_col0"],aggregations:["count()"]
+                                Select Operator [SEL_4] (rows=1 width=87)
+                                  Filter Operator [FIL_25] (rows=1 width=87)
+                                    predicate:key is null
+                                    TableScan [TS_2] (rows=500 width=87)
+                                      default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                  <-Select Operator [SEL_1] (rows=500 width=178)
+                      Output:["_col0","_col1"]
+                      TableScan [TS_0] (rows=500 width=178)
+                        default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
 
 Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: select *

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
index 8599e97..c4bcbab 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
@@ -211,7 +211,7 @@ POSTHOOK: Lineage: store_sales.ss_sold_time_sk SIMPLE [(store_sales_txt)store_sa
 POSTHOOK: Lineage: store_sales.ss_store_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_store_sk, type:int, comment:null), ]
 POSTHOOK: Lineage: store_sales.ss_ticket_number SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ticket_number, type:int, comment:null), ]
 POSTHOOK: Lineage: store_sales.ss_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_wholesale_cost, type:float, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select 
   ss_ticket_number
 from
@@ -219,7 +219,7 @@ from
 group by ss_ticket_number
 limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select 
   ss_ticket_number
 from
@@ -227,10 +227,6 @@ from
 group by ss_ticket_number
 limit 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -248,24 +244,11 @@ STAGE PLANS:
                 TableScan
                   alias: store_sales
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                   Select Operator
                     expressions: ss_ticket_number (type: int)
                     outputColumnNames: ss_ticket_number
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [9]
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 9
-                          native: false
-                          projectedOutputColumns: []
                       keys: ss_ticket_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -274,55 +257,23 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -373,7 +324,7 @@ POSTHOOK: Input: default@store_sales
 18
 19
 20
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select 
     min(ss_ticket_number) m
 from
@@ -385,7 +336,7 @@ from
 group by ss_ticket_number
 order by m
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select 
     min(ss_ticket_number) m
 from
@@ -397,10 +348,6 @@ from
 group by ss_ticket_number
 order by m
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -419,24 +366,11 @@ STAGE PLANS:
                 TableScan
                   alias: store_sales
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                   Select Operator
                     expressions: ss_ticket_number (type: int)
                     outputColumnNames: ss_ticket_number
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [9]
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 9
-                          native: false
-                          projectedOutputColumns: []
                       keys: ss_ticket_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -445,51 +379,19 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: min(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFMinLong(col 0) -> int
-                      className: VectorGroupByOperator
-                      vectorOutput: true
-                      keyExpressions: col 0
-                      native: false
-                      projectedOutputColumns: [0]
                   keys: _col0 (type: int)
                   mode: complete
                   outputColumnNames: _col0, _col1
@@ -497,43 +399,20 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col1 (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1]
                     Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -654,7 +533,7 @@ POSTHOOK: Input: default@store_sales
 80
 81
 82
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
     ss_ticket_number, sum(ss_item_sk), sum(q)
 from
@@ -666,7 +545,7 @@ from
 group by ss_ticket_number
 order by ss_ticket_number
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
     ss_ticket_number, sum(ss_item_sk), sum(q)
 from
@@ -678,10 +557,6 @@ from
 group by ss_ticket_number
 order by ss_ticket_number
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -700,26 +575,12 @@ STAGE PLANS:
                 TableScan
                   alias: store_sales
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                   Select Operator
                     expressions: ss_ticket_number (type: int), ss_item_sk (type: int), ss_quantity (type: int)
                     outputColumnNames: ss_ticket_number, ss_item_sk, ss_quantity
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [9, 2, 10]
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(ss_quantity)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 10) -> int
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 9, col 2
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: ss_ticket_number (type: int), ss_item_sk (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -728,42 +589,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: int)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 2) -> int
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: int), KEY._col1 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -771,20 +605,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: int), _col0 (type: int), _col2 (type: int)
                   outputColumnNames: _col0, _col1, _col2
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0, 2]
                   Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: sum(_col0), sum(_col2)
-                    Group By Vectorization:
-                        aggregators: VectorUDAFSumLong(col 1) -> bigint, VectorUDAFSumLong(col 2) -> bigint
-                        className: VectorGroupByOperator
-                        vectorOutput: true
-                        keyExpressions: col 0
-                        native: false
-                        projectedOutputColumns: [0, 1]
                     keys: _col1 (type: int)
                     mode: complete
                     outputColumnNames: _col0, _col1, _col2
@@ -792,36 +615,17 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
                 Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -942,7 +746,7 @@ POSTHOOK: Input: default@store_sales
 80	151471	704
 81	105109	429
 82	55611	254
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
     ss_ticket_number, ss_item_sk, sum(q)
 from
@@ -954,7 +758,7 @@ from
 group by ss_ticket_number, ss_item_sk
 order by ss_ticket_number, ss_item_sk
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
     ss_ticket_number, ss_item_sk, sum(q)
 from
@@ -966,10 +770,6 @@ from
 group by ss_ticket_number, ss_item_sk
 order by ss_ticket_number, ss_item_sk
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -988,26 +788,12 @@ STAGE PLANS:
                 TableScan
                   alias: store_sales
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                   Select Operator
                     expressions: ss_ticket_number (type: int), ss_item_sk (type: int), ss_quantity (type: int)
                     outputColumnNames: ss_ticket_number, ss_item_sk, ss_quantity
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [9, 2, 10]
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(ss_quantity)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 10) -> int
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 9, col 2
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: ss_ticket_number (type: int), ss_item_sk (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -1016,41 +802,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: int)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 2) -> int
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: int), KEY._col1 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -1058,20 +818,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: int), _col0 (type: int), _col2 (type: int)
                   outputColumnNames: _col0, _col1, _col2
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0, 2]
                   Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: sum(_col2)
-                    Group By Vectorization:
-                        aggregators: VectorUDAFSumLong(col 2) -> bigint
-                        className: VectorGroupByOperator
-                        vectorOutput: true
-                        keyExpressions: col 0, col 1
-                        native: false
-                        projectedOutputColumns: [0]
                     keys: _col1 (type: int), _col0 (type: int)
                     mode: complete
                     outputColumnNames: _col0, _col1, _col2
@@ -1079,36 +828,17 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: _col0 (type: int), _col1 (type: int)
                       sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
                 Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
index ef3073c..8e55ce3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
@@ -127,20 +127,16 @@ POSTHOOK: Lineage: store.s_street_type SIMPLE [(store_txt)store_txt.FieldSchema(
 POSTHOOK: Lineage: store.s_suite_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_suite_number, type:string, comment:null), ]
 POSTHOOK: Lineage: store.s_tax_precentage SIMPLE [(store_txt)store_txt.FieldSchema(name:s_tax_precentage, type:decimal(5,2), comment:null), ]
 POSTHOOK: Lineage: store.s_zip SIMPLE [(store_txt)store_txt.FieldSchema(name:s_zip, type:string, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s_store_id
  from store
  group by s_store_id with rollup
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s_store_id
  from store
  group by s_store_id with rollup
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -174,19 +170,8 @@ STAGE PLANS:
                         Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: GROUPBY operator: Grouping sets not supported
-                vectorized: false
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: GROUPBY operator: Pruning grouping set id not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -227,20 +212,16 @@ AAAAAAAAEAAAAAAA
 AAAAAAAAHAAAAAAA
 AAAAAAAAIAAAAAAA
 AAAAAAAAKAAAAAAA
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s_store_id, GROUPING__ID
  from store
  group by s_store_id with rollup
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s_store_id, GROUPING__ID
  from store
  group by s_store_id with rollup
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -274,29 +255,10 @@ STAGE PLANS:
                         Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: GROUPBY operator: Grouping sets not supported
-                vectorized: false
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -304,16 +266,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
index 45cf8e6..555340d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
@@ -1,13 +1,9 @@
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT cboolean1, IF (cboolean1, 'first', 'second') FROM alltypesorc WHERE cboolean1 IS NOT NULL AND cboolean1 ORDER BY cboolean1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT cboolean1, IF (cboolean1, 'first', 'second') FROM alltypesorc WHERE cboolean1 IS NOT NULL AND cboolean1 ORDER BY cboolean1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -25,68 +21,29 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36700 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsTrue(col 10) -> boolean, SelectColumnIsNotNull(col 10) -> boolean) -> boolean
                     predicate: (cboolean1 and cboolean1 is not null) (type: boolean)
                     Statistics: Num rows: 4587 Data size: 13704 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cboolean1 (type: boolean), if(cboolean1, 'first', 'second') (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [10, 12]
-                          selectExpressions: IfExprStringScalarStringScalar(col 10, val first, val second) -> 12:String
                       Statistics: Num rows: 4587 Data size: 857712 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: boolean)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 4587 Data size: 857712 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 4587 Data size: 857712 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 4587 Data size: 857712 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
index d7fa1c9..e939c67 100644
--- a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
@@ -181,20 +181,16 @@ POSTHOOK: Lineage: customer_demographics.cd_gender SIMPLE [(customer_demographic
 POSTHOOK: Lineage: customer_demographics.cd_marital_status SIMPLE [(customer_demographics_txt)customer_demographics_txt.FieldSchema(name:cd_marital_status, type:string, comment:null), ]
 POSTHOOK: Lineage: customer_demographics.cd_purchase_estimate SIMPLE [(customer_demographics_txt)customer_demographics_txt.FieldSchema(name:cd_purchase_estimate, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[13][bigTable=store_sales] in task 'Map 2' is a cross product
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select count(1) from customer_demographics,store_sales
 where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or
        (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U'))
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select count(1) from customer_demographics,store_sales
 where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or
        (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U'))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -213,120 +209,53 @@ STAGE PLANS:
                 TableScan
                   alias: customer_demographics
                   Statistics: Num rows: 200 Data size: 74200 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                   Reduce Output Operator
                     sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false, Uniform Hash IS false
                     Statistics: Num rows: 200 Data size: 74200 Basic stats: COMPLETE Column stats: NONE
                     value expressions: cd_demo_sk (type: int), cd_marital_status (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: store_sales
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                   Map Join Operator
                     condition map:
                          Inner Join 0 to 1
                     keys:
                       0 
                       1 
-                    Map Join Vectorization:
-                        className: VectorMapJoinOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                        nativeConditionsNotMet: Not empty key IS false
                     outputColumnNames: _col0, _col2, _col16
                     input vertices:
                       0 Map 1
                     Statistics: Num rows: 200000 Data size: 92055200 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
-                      Filter Vectorization:
-                          className: VectorFilterOperator
-                          native: true
-                          predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColEqualLongColumn(col 0, col 2) -> boolean, FilterStringGroupColEqualStringScalar(col 1, val M) -> boolean) -> boolean, FilterExprAndExpr(children: FilterLongColEqualLongColumn(col 0, col 2) -> boolean, FilterStringGroupColEqualStringScalar(col 1, val U) -> boolean) -> boolean) -> boolean
                       predicate: (((_col0 = _col16) and (_col2 = 'M')) or ((_col0 = _col16) and (_col2 = 'U'))) (type: boolean)
                       Statistics: Num rows: 100000 Data size: 46027600 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: []
                         Statistics: Num rows: 100000 Data size: 46027600 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count(1)
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 3:long) -> bigint
-                              className: VectorGroupByOperator
-                              vectorOutput: true
-                              native: false
-                              projectedOutputColumns: [0]
                           mode: hash
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           Reduce Output Operator
                             sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkOperator
-                                native: false
-                                nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                                nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false, Uniform Hash IS false
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[31/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
index 9e185c6..3d67664 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
@@ -1231,18 +1231,14 @@ POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_web_page_sk SIMPLE [
 POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ]
 PREHOOK: query: ------------------------------------------------------------------------------------------
 
-explain vectorization expression
+explain
 select count(distinct ws_order_number) from web_sales
 PREHOOK: type: QUERY
 POSTHOOK: query: ------------------------------------------------------------------------------------------
 
-explain vectorization expression
+explain
 select count(distinct ws_order_number) from web_sales
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1261,24 +1257,11 @@ STAGE PLANS:
                 TableScan
                   alias: web_sales
                   Statistics: Num rows: 2000 Data size: 3520000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]
                   Select Operator
                     expressions: ws_order_number (type: int)
                     outputColumnNames: ws_order_number
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [16]
                     Statistics: Num rows: 2000 Data size: 3520000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 16
-                          native: false
-                          projectedOutputColumns: []
                       keys: ws_order_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -1287,88 +1270,36 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 3520000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 1760000 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0) -> bigint
-                      className: VectorGroupByOperator
-                      vectorOutput: true
-                      native: false
-                      projectedOutputColumns: [0]
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: Uniform Hash IS false
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
index fc82926..a7a74c3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
@@ -97,14 +97,10 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+PREHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+POSTHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -197,14 +193,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 -17045922556
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+PREHOOK: query: EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+POSTHOOK: query: EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -222,67 +214,30 @@ STAGE PLANS:
                 TableScan
                   alias: over1korc
                   Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Select Operator
                     expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                     Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
index 1a31067..6faf453 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
@@ -23,7 +23,7 @@ POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchem
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
     FROM decimal_vgby
@@ -33,17 +33,13 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
     FROM decimal_vgby
     GROUP BY cint
     HAVING COUNT(*) > 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -61,26 +57,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_vgby
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3, 1, 2]
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 1) -> bigint, VectorUDAFMaxDecimal(col 1) -> decimal(20,10), VectorUDAFMinDecimal(col 1) -> decimal(20,10), VectorUDAFSumDecimal(col 1) -> decimal(38,18), VectorUDAFCount(col 2) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(23,14), VectorUDAFMinDecimal(col 2) -> decimal(23,14), VectorUDAFSumDecimal(col 2) -> decimal(38,18), VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 3
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
@@ -89,65 +71,28 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), sum(VALUE._col3), count(VALUE._col4), max(VALUE._col5), min(VALUE._col6), sum(VALUE._col7), count(VALUE._col8)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(20,10), VectorUDAFMinDecimal(col 3) -> decimal(20,10), VectorUDAFSumDecimal(col 4) -> decimal(38,18), VectorUDAFCountMerge(col 5) -> bigint, VectorUDAFMaxDecimal(col 6) -> decimal(23,14), VectorUDAFMinDecimal(col 7) -> decimal(23,14), VectorUDAFSumDecimal(col 8) -> decimal(38,18), VectorUDAFCountMerge(col 9) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                 Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  Filter Vectorization:
-                      className: VectorFilterOperator
-                      native: true
-                      predicateExpression: FilterLongColGreaterLongScalar(col 9, val 1) -> boolean
                   predicate: (_col9 > 1) (type: boolean)
                   Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                     Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -187,7 +132,7 @@ POSTHOOK: Input: default@decimal_vgby
 762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
 NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
     FROM decimal_vgby
@@ -195,17 +140,13 @@ EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
     HAVING COUNT(*) > 1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Now add the others...
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
     FROM decimal_vgby
     GROUP BY cint
     HAVING COUNT(*) > 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -223,27 +164,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_vgby
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3, 1, 2]
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 1) -> bigint, VectorUDAFMaxDecimal(col 1) -> decimal(20,10), VectorUDAFMinDecimal(col 1) -> decimal(20,10), VectorUDAFSumDecimal(col 1) -> decimal(38,18), VectorUDAFAvgDecimal(col 1) -> struct<count:bigint,sum:decimal(30,10)>, VectorUDAFStdPopDecimal(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampDecimal(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCount(col 2) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(23,14), VectorUDAFMinDecimal(col 2) -> decimal(23,14), VectorUDAFSumDecimal(col 2) -> decimal(38,18), VectorUDAFAvgDecimal(col 2) -> struct<count:bigint,sum:decimal(33,14)>, VectorUDAFStdPopDecimal(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampDecimal(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          keyExpressions: col 3
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgDecimal(col 1) -> struct<count:bigint,sum:decimal(30,10)> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopDecimal(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampDecimal(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDecimal(col 2) -> struct<count:bigint,sum:decimal(33,14)> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopDecimal(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampDecimal(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
@@ -256,21 +182,8 @@ STAGE PLANS:
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: struct<count:bigint,sum:decimal(33,14),input:decimal(23,14)>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: struct<count:bigint,sum:double,variance:double>), _col15 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)> of Column[VALUE._col4] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), sum(VALUE._col3), avg(VALUE._col4), stddev_pop(VALUE._col5), stddev_samp(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9), sum(VALUE._col10), avg(VALUE._col11), stddev_pop(VALUE._col12), stddev_samp(VALUE._col13), count(VALUE._col14)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
index 5cce027..16d9929 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
@@ -1,76 +1,25 @@
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10
+PREHOOK: query: EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10
+POSTHOOK: query: EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 638316 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 5) -> boolean, SelectColumnIsNotNull(col 2) -> boolean, SelectColumnIsNotNull(col 10) -> boolean, SelectColumnIsNotNull(col 8) -> boolean) -> boolean
-                    predicate: (cdouble is not null and cint is not null and cboolean1 is not null and ctimestamp1 is not null) (type: boolean)
-                    Statistics: Num rows: 5112 Data size: 265564 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cdouble (type: double), cint (type: int), cboolean1 (type: boolean), ctimestamp1 (type: timestamp), CAST( cdouble AS decimal(20,10)) (type: decimal(20,10)), CAST( cint AS decimal(23,14)) (type: decimal(23,14)), CAST( cboolean1 AS decimal(5,2)) (type: decimal(5,2)), CAST( ctimestamp1 AS decimal(15,0)) (type: decimal(15,0))
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [5, 2, 10, 8, 12, 13, 14, 15]
-                          selectExpressions: CastDoubleToDecimal(col 5) -> 12:decimal(20,10), CastLongToDecimal(col 2) -> 13:decimal(23,14), CastLongToDecimal(col 10) -> 14:decimal(5,2), CastTimestampToDecimal(col 8) -> 15:decimal(15,0)
-                      Statistics: Num rows: 5112 Data size: 2555740 Basic stats: COMPLETE Column stats: COMPLETE
-                      Limit
-                        Number of rows: 10
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                        Statistics: Num rows: 10 Data size: 5008 Basic stats: COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-                          Statistics: Num rows: 10 Data size: 5008 Basic stats: COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: (cdouble is not null and cint is not null and cboolean1 is not null and ctimestamp1 is not null) (type: boolean)
+            Select Operator
+              expressions: cdouble (type: double), cint (type: int), cboolean1 (type: boolean), ctimestamp1 (type: timestamp), CAST( cdouble AS decimal(20,10)) (type: decimal(20,10)), CAST( cint AS decimal(23,14)) (type: decimal(23,14)), CAST( cboolean1 AS decimal(5,2)) (type: decimal(5,2)), CAST( ctimestamp1 AS decimal(15,0)) (type: decimal(15,0))
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
index 16232c2..d37b973 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
@@ -15,18 +15,14 @@ POSTHOOK: Output: default@decimal_test
 POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
+PREHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -44,74 +40,32 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_test
                   Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterDecimalColGreaterDecimalScalar(col 1, val 0) -> boolean, FilterDecimalColLessDecimalScalar(col 1, val 12345.5678) -> boolean, FilterDecimalColNotEqualDecimalScalar(col 2, val 0) -> boolean, FilterDecimalColGreaterDecimalScalar(col 2, val 1000) -> boolean, SelectColumnIsNotNull(col 0) -> boolean) -> boolean
                     predicate: ((cdecimal1 > 0) and (cdecimal1 < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
                     Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((cdecimal1 + 2.34) / cdecimal2) (type: decimal(38,23)), (cdecimal1 * (cdecimal2 / 3.4)) (type: decimal(38,28)), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                          selectExpressions: DecimalColAddDecimalColumn(col 1, col 2) -> 3:decimal(25,14), DecimalColSubtractDecimalColumn(col 1, col 4)(children: DecimalScalarMultiplyDecimalColumn(val 2, col 2) -> 4:decimal(25,14)) -> 5:decimal(26,14), DecimalColDivideDecimalColumn(col 6, col 2)(children: DecimalColAddDecimalScalar(col 1, val 2.34) -> 6:decimal(21,10)) -> 7:decimal(38,23), DecimalColMultiplyDecimalColumn(col 1, col 8)(children: DecimalColDivideDecimalScalar(col 2, val 3.4) -> 8:decimal(28,18)) -> 9:decimal(38,28), DecimalColModuloDecimalScalar(col 1, val 10) -> 10:decimal(12,10), CastDecimalToLong(col 1) -> 11:long, CastDecimalToLong(col 2) -> 12:long, CastDecimalToLong(col 2) -> 13:long, CastDecimalToLong(col 1) -> 14:long, CastDecimalToBoolean(col 1) -> 15:long, CastDecimalToDouble(col 2) -> 16:double, CastDecimalToDouble(col 1) -> 17:double, CastDecimalToString(col 2) -> 18:String, CastDecimalToTimestamp(col 1) -> 19:timestamp
                       Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(25,14)), _col1 (type: decimal(26,14)), _col2 (type: decimal(38,23)), _col3 (type: decimal(38,28)), _col4 (type: decimal(12,10)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
                         sort order: ++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(25,14)), KEY.reducesinkkey1 (type: decimal(26,14)), KEY.reducesinkkey2 (type: decimal(38,23)), KEY.reducesinkkey3 (type: decimal(38,28)), KEY.reducesinkkey4 (type: decimal(12,10)), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: smallint), KEY.reducesinkkey7 (type: tinyint), KEY.reducesinkkey8 (type: bigint), KEY.reducesinkkey9 (type: boolean), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: string), KEY.reducesinkkey13 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
                 Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 10 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 10 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
index 6666923..361c46b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
@@ -72,16 +72,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
 POSTHOOK: Output: default@t2
 POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -99,23 +95,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1)(children: CastDecimalToBoolean(col 0) -> 1:long) -> boolean
                     predicate: dec is not null (type: boolean)
                     Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dec (type: decimal(4,2))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -123,21 +108,12 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: decimal(6,2))
                           1 _col0 (type: decimal(6,2))
-                        Map Join Vectorization:
-                            className: VectorMapJoinOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            nativeConditionsNotMet: Supports Key Types IS false
-                            nativeNotSupportedKeyTypes: DECIMAL
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 2
                         Statistics: Num rows: 1153 Data size: 129236 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1153 Data size: 129236 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -145,56 +121,25 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1)(children: CastDecimalToBoolean(col 0) -> 1:long) -> boolean
                     predicate: dec is not null (type: boolean)
                     Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dec (type: decimal(4,0))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(6,2))
                         sort order: +
                         Map-reduce partition columns: _col0 (type: decimal(6,2))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
index 4a2ae48..6d5b578 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
@@ -14,7 +14,7 @@ POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.F
 POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression 
+explain 
 select
    cdecimal1
   ,Round(cdecimal1, 2)
@@ -53,7 +53,7 @@ and sin(cdecimal1) >= -1.0
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression 
+explain 
 select
    cdecimal1
   ,Round(cdecimal1, 2)
@@ -90,69 +90,22 @@ where cbigint % 500 = 0
 -- test use of a math function in the WHERE clause
 and sin(cdecimal1) >= -1.0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_test
-                  Statistics: Num rows: 12288 Data size: 2201752 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 4, val 0)(children: LongColModuloLongScalar(col 0, val 500) -> 4:long) -> boolean, FilterDoubleColGreaterEqualDoubleScalar(col 6, val -1.0)(children: FuncSinDoubleToDouble(col 5)(children: CastDecimalToDouble(col 2) -> 5:double) -> 6:double) -> boolean) -> boolean
-                    predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean)
-                    Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 7, 8, 9, 10, 5, 11, 12, 13, 15, 6, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 2, 28, 4, 29]
-                          selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2, decimalPlaces 2) -> 7:decimal(13,2), FuncRoundDecimalToDecimal(col 2) -> 8:decimal(11,0), FuncFloorDecimalToDecimal(col 2) -> 9:decimal(11,0), FuncCeilDecimalToDecimal(col 2) -> 10:decimal(11,0), RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 58)(children: FuncExpDoubleToDouble(col 5)(children: CastDecimalToDouble(col 2) -> 5:double) -> 6:double) -> 5:double, FuncLnDoubleToDouble(col 6)(children: CastDecimalToDouble(col 2) -> 6:double) -> 11:double, FuncLog10DoubleToDouble(col 6)(children: CastDecimalToDouble(col 2) -> 6:double) -> 12:double, FuncLog2DoubleToDouble(col 6)(children: CastDecimalToDouble(col 2) -> 6:double) -> 13:double, FuncLog2DoubleToDouble(col 6)(children: CastDecimalToDouble(col 14)(children: DecimalColSubtractDecimalScalar(col 2, val 15601) -> 14:decimal(21,10)) -> 6:double) -> 15:double, VectorUDFAdaptor(log(2, cdecimal1)) -> 6:Double, VectorUDFAdaptor(power(log2(cd
 ecimal1), 2))(children: FuncLog2DoubleToDouble(col 16)(children: CastDecimalToDouble(col 2) -> 16:double) -> 17:double) -> 16:Double, VectorUDFAdaptor(power(log2(cdecimal1), 2))(children: FuncLog2DoubleToDouble(col 17)(children: CastDecimalToDouble(col 2) -> 17:double) -> 18:double) -> 17:Double, FuncSqrtDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 19:double, FuncAbsDecimalToDecimal(col 2) -> 20:decimal(20,10), FuncSinDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 21:double, FuncASinDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 22:double, FuncCosDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 23:double, FuncACosDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 24:double, FuncATanDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 25:double, FuncDegreesDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -
 > 18:double) -> 26:double, FuncRadiansDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 27:double, FuncNegateDecimalToDecimal(col 2) -> 28:decimal(20,10), FuncSignDecimalToLong(col 2) -> 4:long, FuncCosDoubleToDouble(col 18)(children: DoubleColAddDoubleScalar(col 29, val 3.14159)(children: DoubleColUnaryMinus(col 18)(children: FuncSinDoubleToDouble(col 29)(children: FuncLnDoubleToDouble(col 18)(children: CastDecimalToDouble(col 2) -> 18:double) -> 29:double) -> 18:double) -> 29:double) -> 18:double) -> 29:double
-                      Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: decimal_test
+          Filter Operator
+            predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean)
+            Select Operator
+              expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25
+              ListSink
 
 PREHOOK: query: select
    cdecimal1

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
index 90f80a3..5583874 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
@@ -545,14 +545,10 @@ NULL	NULL
 123456789.0123456789	15241578753238836.75019051998750190521
 1234567890.1234560000	NULL
 1234567890.1234567890	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
+PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
+POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -570,26 +566,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_precision
                   Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: dec (type: decimal(20,10))
                     outputColumnNames: dec
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: avg(dec), sum(dec)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgDecimal(col 0) -> struct<count:bigint,sum:decimal(30,10)>, VectorUDAFSumDecimal(col 0) -> decimal(38,18)
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          native: false
-                          projectedOutputColumns: [0, 1]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgDecimal(col 0) -> struct<count:bigint,sum:decimal(30,10)> output type STRUCT requires PRIMITIVE IS false
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
@@ -599,21 +581,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>), _col1 (type: decimal(30,10))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), sum(VALUE._col1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
index b99935d..4063199 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
@@ -30,16 +30,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_txt
 #### A masked pattern was here ####
 101
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_txt order by dec
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_txt order by dec
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -68,33 +64,15 @@ STAGE PLANS:
                       value expressions: _col1 (type: decimal(11,0))
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -116,16 +94,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_txt
 #### A masked pattern was here ####
 101	100
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -154,33 +128,15 @@ STAGE PLANS:
                       value expressions: _col0 (type: decimal(10,0))
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -230,16 +186,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_rc
 #### A masked pattern was here ####
 101
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_rc order by dec
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_rc order by dec
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -268,33 +220,15 @@ STAGE PLANS:
                       value expressions: _col1 (type: decimal(11,0))
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.row.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.RCFileInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -316,16 +250,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_rc
 #### A masked pattern was here ####
 101	100
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -354,33 +284,15 @@ STAGE PLANS:
                       value expressions: _col0 (type: decimal(10,0))
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.row.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.RCFileInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -430,16 +342,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_orc
 #### A masked pattern was here ####
 101
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_orc order by dec
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_orc order by dec
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -457,61 +365,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_orc
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
-                        selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 1:decimal(11,0)
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(10,0))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(11,0))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(10,0)), VALUE._col0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -533,16 +406,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_orc
 #### A masked pattern was here ####
 101	100
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -560,60 +429,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_orc
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: dec (type: decimal(10,0))
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: round(_col0, -1) (type: decimal(11,0))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [1, 0]
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[04/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
index ef19bad..511bd79 100644
--- a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
@@ -32,16 +32,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@orc_table_2a
 POSTHOOK: Lineage: orc_table_2a.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -57,45 +53,18 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: c:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -108,23 +77,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: a (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -132,14 +90,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinInnerBigOnlyLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col1
                         input vertices:
                           0 Map 1
@@ -147,35 +97,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: int)
                           outputColumnNames: _col0
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0]
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -196,16 +126,12 @@ POSTHOOK: Input: default@orc_table_1a
 POSTHOOK: Input: default@orc_table_2a
 #### A masked pattern was here ####
 3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -221,56 +147,23 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: a (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        Group By Vectorization:
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            keyExpressions: col 0
-                            native: false
-                            projectedOutputColumns: []
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
-                          Spark Hash Table Sink Vectorization:
-                              className: VectorSparkHashTableSinkOperator
-                              native: true
                           keys:
                             0 _col0 (type: int)
                             1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -283,23 +176,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -307,42 +189,18 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinLeftSemiLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col0
                         input vertices:
                           1 Map 2
                         Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: c:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -399,16 +257,12 @@ POSTHOOK: Input: default@values__tmp__table__4
 POSTHOOK: Output: default@orc_table_2b
 POSTHOOK: Lineage: orc_table_2b.c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: orc_table_2b.v2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -424,45 +278,18 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -475,23 +302,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -499,14 +315,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col1, _col2
                         input vertices:
                           1 Map 2
@@ -514,36 +322,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: string), _col2 (type: int)
                           outputColumnNames: _col0, _col1
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 0]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 
@@ -564,16 +351,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -589,45 +372,18 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -640,23 +396,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -664,44 +409,18 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [1]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 1, 2]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           1 Map 2
                         Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 
@@ -722,16 +441,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	3	3	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -747,45 +462,18 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -798,23 +486,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -822,15 +499,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           1 Map 2
@@ -838,37 +506,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col2 (type: string), (_col3 * 2) (type: int), (_col0 * 5) (type: int), _col1 (type: string)
                           outputColumnNames: _col0, _col1, _col2, _col3
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 3, 4, 1]
-                              selectExpressions: LongColMultiplyLongScalar(col 0, val 2) -> 3:long, LongColMultiplyLongScalar(col 0, val 5) -> 4:long
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String, bigint, bigint
             Local Work:
               Map Reduce Local Work
 
@@ -889,16 +535,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	6	15	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -914,45 +556,18 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -965,23 +580,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -989,15 +593,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2
                         input vertices:
                           1 Map 2
@@ -1005,36 +600,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col2 (type: string), _col1 (type: string), _col0 (type: int)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 1, 0]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 
@@ -1055,16 +629,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	THREE	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -1080,45 +650,18 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -1131,23 +674,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1155,15 +687,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [1, 2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col1, _col2, _col3
                         input vertices:
                           1 Map 2
@@ -1171,36 +694,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col3 (type: int), _col2 (type: string), _col1 (type: string)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0, 2, 1]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 
@@ -1221,16 +723,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 3	three	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -1246,45 +744,18 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -1297,23 +768,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1321,15 +781,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [1]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col2, _col3
                         input vertices:
                           1 Map 2
@@ -1337,36 +788,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: string), _col3 (type: string), _col2 (type: int)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0, 2, 1]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 
@@ -1387,16 +817,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	THREE	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -1412,45 +838,18 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -1463,23 +862,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1487,15 +875,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [1]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col3
                         input vertices:
                           1 Map 2
@@ -1503,36 +882,15 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: int), _col0 (type: string), _col3 (type: string)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [1, 0, 2]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
index 91af229..c08fbda 100644
--- a/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
@@ -1,4 +1,4 @@
-PREHOOK: query: explain vectorization 
+PREHOOK: query: explain 
 select count(*) from (select c.ctinyint 
 from alltypesorc c
 left outer join alltypesorc cd
@@ -7,7 +7,7 @@ left outer join alltypesorc hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization 
+POSTHOOK: query: explain 
 select count(*) from (select c.ctinyint 
 from alltypesorc c
 left outer join alltypesorc cd
@@ -16,10 +16,6 @@ left outer join alltypesorc hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -44,14 +40,6 @@ STAGE PLANS:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -68,14 +56,6 @@ STAGE PLANS:
                         0 _col0 (type: tinyint)
                         1 _col0 (type: tinyint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Local Work:
               Map Reduce Local Work
 
@@ -123,25 +103,10 @@ STAGE PLANS:
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Local Work:
               Map Reduce Local Work
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index f4a1e6e..012c3eb 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -4,7 +4,7 @@ PREHOOK: query: -- SORT_QUERY_RESULTS
 -- Query copied from subquery_in.q
 
 -- non agg, non corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
@@ -16,16 +16,12 @@ POSTHOOK: query: -- SORT_QUERY_RESULTS
 -- Query copied from subquery_in.q
 
 -- non agg, non corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR')
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -59,10 +55,6 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: int)
                             1 _col0 (type: int)
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
             Local Work:
               Map Reduce Local Work
         Map 3 
@@ -83,37 +75,17 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 4 
             Execution mode: vectorized
             Local Work:
               Map Reduce Local Work
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                 Spark HashTable Sink Operator
-                  Spark Hash Table Sink Vectorization:
-                      className: VectorSparkHashTableSinkOperator
-                      native: true
                   keys:
                     0 _col1 (type: int)
                     1 _col0 (type: int)
@@ -165,10 +137,6 @@ STAGE PLANS:
                                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
             Local Work:
               Map Reduce Local Work
 
@@ -203,23 +171,19 @@ POSTHOOK: Input: default@lineitem
 64128	9141
 82704	7721
 PREHOOK: query: -- non agg, corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- non agg, corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -253,10 +217,6 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: int), 1 (type: int)
                             1 _col0 (type: int), _col1 (type: int)
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
             Local Work:
               Map Reduce Local Work
         Map 3 
@@ -277,37 +237,17 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 4 
             Execution mode: vectorized
             Local Work:
               Map Reduce Local Work
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                 Spark HashTable Sink Operator
-                  Spark Hash Table Sink Vectorization:
-                      className: VectorSparkHashTableSinkOperator
-                      native: true
                   keys:
                     0 _col1 (type: int)
                     1 _col0 (type: int)
@@ -359,10 +299,6 @@ STAGE PLANS:
                                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
index cbf7d03..4710a73 100644
--- a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
@@ -101,16 +101,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select bo, max(b) from vectortab2korc group by bo order by bo desc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select bo, max(b) from vectortab2korc group by bo order by bo desc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -128,26 +124,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: bo (type: boolean), b (type: bigint)
                     outputColumnNames: bo, b
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [7, 3]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(b)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMaxLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 7
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: bo (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -156,41 +138,14 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMaxLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -198,36 +153,17 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: boolean)
                   sort order: -
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: bigint)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[05/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index 79638c1..dbaf14d 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -97,14 +97,10 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+PREHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+POSTHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -193,14 +189,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
 -17045922556
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+PREHOOK: query: EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
+POSTHOOK: query: EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -217,66 +209,29 @@ STAGE PLANS:
                 TableScan
                   alias: over1korc
                   Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Select Operator
                     expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                     Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index 919e290..cfdfce1 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -23,7 +23,7 @@ POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchem
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
     FROM decimal_vgby
@@ -33,17 +33,13 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
     FROM decimal_vgby
     GROUP BY cint
     HAVING COUNT(*) > 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -60,26 +56,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_vgby
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3, 1, 2]
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 1) -> bigint, VectorUDAFMaxDecimal(col 1) -> decimal(20,10), VectorUDAFMinDecimal(col 1) -> decimal(20,10), VectorUDAFSumDecimal(col 1) -> decimal(38,18), VectorUDAFCount(col 2) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(23,14), VectorUDAFMinDecimal(col 2) -> decimal(23,14), VectorUDAFSumDecimal(col 2) -> decimal(38,18), VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 3
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
@@ -88,65 +70,27 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), sum(VALUE._col3), count(VALUE._col4), max(VALUE._col5), min(VALUE._col6), sum(VALUE._col7), count(VALUE._col8)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(20,10), VectorUDAFMinDecimal(col 3) -> decimal(20,10), VectorUDAFSumDecimal(col 4) -> decimal(38,18), VectorUDAFCountMerge(col 5) -> bigint, VectorUDAFMaxDecimal(col 6) -> decimal(23,14), VectorUDAFMinDecimal(col 7) -> decimal(23,14), VectorUDAFSumDecimal(col 8) -> decimal(38,18), VectorUDAFCountMerge(col 9) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                 Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  Filter Vectorization:
-                      className: VectorFilterOperator
-                      native: true
-                      predicateExpression: FilterLongColGreaterLongScalar(col 9, val 1) -> boolean
                   predicate: (_col9 > 1) (type: boolean)
                   Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                     Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2048 Data size: 360843 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -186,7 +130,7 @@ POSTHOOK: Input: default@decimal_vgby
 762	2	5831542.2692483780	1531.2194054054	5833073.4886537834	2	6984454.21109769200000	1833.94569230769250	6986288.15678999969250
 NULL	3072	9318.4351351351	-4298.1513513514	5018444.1081079808	3072	11160.71538461538500	-5147.90769230769300	6010604.30769230735360
 PREHOOK: query: -- Now add the others...
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
     FROM decimal_vgby
@@ -194,17 +138,13 @@ EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
     HAVING COUNT(*) > 1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Now add the others...
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
     FROM decimal_vgby
     GROUP BY cint
     HAVING COUNT(*) > 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -221,27 +161,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_vgby
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                     outputColumnNames: cint, cdecimal1, cdecimal2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3, 1, 2]
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 1) -> bigint, VectorUDAFMaxDecimal(col 1) -> decimal(20,10), VectorUDAFMinDecimal(col 1) -> decimal(20,10), VectorUDAFSumDecimal(col 1) -> decimal(38,18), VectorUDAFAvgDecimal(col 1) -> struct<count:bigint,sum:decimal(30,10)>, VectorUDAFStdPopDecimal(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampDecimal(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCount(col 2) -> bigint, VectorUDAFMaxDecimal(col 2) -> decimal(23,14), VectorUDAFMinDecimal(col 2) -> decimal(23,14), VectorUDAFSumDecimal(col 2) -> decimal(38,18), VectorUDAFAvgDecimal(col 2) -> struct<count:bigint,sum:decimal(33,14)>, VectorUDAFStdPopDecimal(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampDecimal(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          keyExpressions: col 3
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgDecimal(col 1) -> struct<count:bigint,sum:decimal(30,10)> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopDecimal(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampDecimal(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDecimal(col 2) -> struct<count:bigint,sum:decimal(33,14)> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopDecimal(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampDecimal(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                       keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
@@ -253,20 +178,7 @@ STAGE PLANS:
                         Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: struct<count:bigint,sum:decimal(33,14),input:decimal(23,14)>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: struct<count:bigint,sum:double,variance:double>), _col15 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:decimal(30,10),input:decimal(20,10)> of Column[VALUE._col4] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), sum(VALUE._col3), avg(VALUE._col4), stddev_pop(VALUE._col5), stddev_samp(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9), sum(VALUE._col10), avg(VALUE._col11), stddev_pop(VALUE._col12), stddev_samp(VALUE._col13), count(VALUE._col14)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
index 11d7609..0493994 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
@@ -72,16 +72,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
 POSTHOOK: Output: default@t2
 POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -97,40 +93,18 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1)(children: CastDecimalToBoolean(col 0) -> 1:long) -> boolean
                     predicate: dec is not null (type: boolean)
                     Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dec (type: decimal(4,0))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
-                        Spark Hash Table Sink Vectorization:
-                            className: VectorSparkHashTableSinkOperator
-                            native: true
                         keys:
                           0 _col0 (type: decimal(6,2))
                           1 _col0 (type: decimal(6,2))
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Local Work:
               Map Reduce Local Work
 
@@ -143,23 +117,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1)(children: CastDecimalToBoolean(col 0) -> 1:long) -> boolean
                     predicate: dec is not null (type: boolean)
                     Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dec (type: decimal(4,2))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -167,35 +130,18 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: decimal(6,2))
                           1 _col0 (type: decimal(6,2))
-                        Map Join Vectorization:
-                            className: VectorMapJoinOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            nativeConditionsNotMet: Supports Key Types IS false
-                            nativeNotSupportedKeyTypes: DECIMAL
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 2
                         Statistics: Num rows: 1153 Data size: 129236 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1153 Data size: 129236 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
index af9ec87..803a53b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select distinct s, t from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select distinct s, t from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -131,24 +127,11 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: t (type: tinyint), s (type: string)
                     outputColumnNames: t, s
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 8
-                          native: false
-                          projectedOutputColumns: []
                       keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -157,38 +140,12 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -196,16 +153,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: string), _col0 (type: tinyint)
                   outputColumnNames: _col0, _col1
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0]
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_elt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_elt.q.out b/ql/src/test/results/clientpositive/spark/vector_elt.q.out
index b49462a..bb66867 100644
--- a/ql/src/test/results/clientpositive/spark/vector_elt.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_elt.q.out
@@ -1,79 +1,29 @@
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
+PREHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
+POSTHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 0) -> boolean
-                    predicate: (ctinyint > 0) (type: boolean)
-                    Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [13, 6, 2, 16]
-                          selectExpressions: LongColAddLongScalar(col 12, val 1)(children: LongColModuloLongScalar(col 0, val 2)(children: col 0) -> 12:long) -> 13:long, VectorElt(columns [14, 6, 15])(children: LongColAddLongScalar(col 12, val 1)(children: LongColModuloLongScalar(col 0, val 2)(children: col 0) -> 12:long) -> 14:long, col 6, CastLongToString(col 2) -> 15:String) -> 16:string
-                      Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
-                      Limit
-                        Number of rows: 10
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                        Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-                          Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: (ctinyint > 0) (type: boolean)
+            Select Operator
+              expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
@@ -97,7 +47,7 @@ POSTHOOK: Input: default@alltypesorc
 1	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
 2	cvLH6Eat2yFsyy7p	528534767	528534767
 1	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
        elt('1', 'abc', 'defg'),
@@ -110,7 +60,7 @@ SELECT elt(2, 'abc', 'defg'),
        elt(3, 'abc', 'defg')
 FROM alltypesorc LIMIT 1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
        elt('1', 'abc', 'defg'),
@@ -123,67 +73,22 @@ SELECT elt(2, 'abc', 'defg'),
        elt(3, 'abc', 'defg')
 FROM alltypesorc LIMIT 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Select Operator
-                    expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                        selectExpressions: ConstantVectorExpression(val defg) -> 12:string, ConstantVectorExpression(val cc) -> 13:string, ConstantVectorExpression(val abc) -> 14:string, ConstantVectorExpression(val 2) -> 15:string, ConstantVectorExpression(val 12345) -> 16:string, ConstantVectorExpression(val 123456789012) -> 17:string, ConstantVectorExpression(val 1.25) -> 18:string, ConstantVectorExpression(val 16.0) -> 19:string, ConstantVectorExpression(val null) -> 20:string, ConstantVectorExpression(val null) -> 21:string
-                    Statistics: Num rows: 12288 Data size: 8687784 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 1
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+            Limit
+              Number of rows: 1
+              ListSink
 
 PREHOOK: query: SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
index a99dfd8..e13c311 100644
--- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s, t, max(b) from vectortab2korc group by s, t
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s, t, max(b) from vectortab2korc group by s, t
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -131,26 +127,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: t (type: tinyint), s (type: string), b (type: bigint)
                     outputColumnNames: t, s, b
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 3]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(b)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMaxLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 8
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -159,41 +141,14 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMaxLong(col 2) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -201,16 +156,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0, 2]
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[08/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
index 423fdbf..996b893 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
@@ -120,7 +120,7 @@ POSTHOOK: Lineage: part_orc.p_size SIMPLE [(part_staging)part_staging.FieldSchem
 POSTHOOK: Lineage: part_orc.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ]
 PREHOOK: query: --1. test1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -132,7 +132,7 @@ from noop(on part_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: --1. test1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -142,10 +142,6 @@ from noop(on part_orc
   order by p_name
   )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -176,14 +172,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -236,11 +224,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -272,11 +255,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -399,7 +377,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
 PREHOOK: query: -- 2. testJoinWithNoop
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
 from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j
@@ -408,17 +386,13 @@ sort by j.p_name)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 2. testJoinWithNoop
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
 from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j
 distribute by j.p_mfgr
 sort by j.p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -454,14 +428,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -531,14 +497,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -613,11 +571,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -649,11 +602,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -759,7 +707,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	44
 Manufacturer#5	almond azure blanched chiffon midnight	23	-23
 PREHOOK: query: -- 3. testOnlyPTF
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size
 from noop(on part_orc
 partition by p_mfgr
@@ -767,16 +715,12 @@ order by p_name)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 3. testOnlyPTF
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size
 from noop(on part_orc
 partition by p_mfgr
 order by p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -806,14 +750,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -866,11 +802,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -965,7 +896,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46
 Manufacturer#5	almond azure blanched chiffon midnight	23
 PREHOOK: query: -- 4. testPTFAlias
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -977,7 +908,7 @@ from noop(on part_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 4. testPTFAlias
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -987,10 +918,6 @@ from noop(on part_orc
   order by p_name
   ) abc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1021,14 +948,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -1081,11 +1000,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -1117,11 +1031,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -1244,7 +1153,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
 PREHOOK: query: -- 5. testPTFAndWhereWithWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -1256,7 +1165,7 @@ from noop(on part_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 5. testPTFAndWhereWithWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -1266,10 +1175,6 @@ from noop(on part_orc
           order by p_name 
           )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1300,14 +1205,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -1360,11 +1257,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -1396,11 +1288,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -1524,7 +1411,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	46	44
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	23	-23
 PREHOOK: query: -- 6. testSWQAndPTFAndGBy
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -1537,7 +1424,7 @@ group by p_mfgr, p_name, p_size
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 6. testSWQAndPTFAndGBy
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -1548,10 +1435,6 @@ from noop(on part_orc
           ) 
 group by p_mfgr, p_name, p_size
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1582,14 +1465,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -1642,11 +1517,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -1686,11 +1556,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
@@ -1816,7 +1681,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	46	44
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	23	-23
 PREHOOK: query: -- 7. testJoin
 
-explain vectorization extended
+explain extended
 select abc.* 
 from noop(on part_orc 
 partition by p_mfgr 
@@ -1825,17 +1690,13 @@ order by p_name
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 7. testJoin
 
-explain vectorization extended
+explain extended
 select abc.* 
 from noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
 ) abc join part_orc p1 on abc.p_partkey = p1.p_partkey
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1866,14 +1727,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -1943,14 +1796,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -2003,11 +1848,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: double), VALUE._col6 (type: string)
@@ -2128,7 +1968,7 @@ POSTHOOK: Input: default@part_orc
 90681	almond antique chartreuse khaki white	Manufacturer#3	Brand#31	MEDIUM BURNISHED TIN	17	SM CASE	1671.68	are slyly after the sl
 PREHOOK: query: -- 8. testJoinRight
 
-explain vectorization extended
+explain extended
 select abc.* 
 from part_orc p1 join noop(on part_orc 
 partition by p_mfgr 
@@ -2137,17 +1977,13 @@ order by p_name
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 8. testJoinRight
 
-explain vectorization extended
+explain extended
 select abc.* 
 from part_orc p1 join noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
 ) abc on abc.p_partkey = p1.p_partkey
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2181,14 +2017,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -2255,14 +2083,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -2354,11 +2174,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: double), VALUE._col6 (type: string)
@@ -2444,7 +2259,7 @@ POSTHOOK: Input: default@part_orc
 90681	almond antique chartreuse khaki white	Manufacturer#3	Brand#31	MEDIUM BURNISHED TIN	17	SM CASE	1671.68	are slyly after the sl
 PREHOOK: query: -- 9. testNoopWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name, p_size desc) as r
 from noopwithmap(on part_orc
@@ -2453,17 +2268,13 @@ order by p_name, p_size desc)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 9. testNoopWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name, p_size desc) as r
 from noopwithmap(on part_orc
 partition by p_mfgr
 order by p_name, p_size desc)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2509,12 +2320,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -2567,11 +2372,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
@@ -2603,11 +2403,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
@@ -2711,7 +2506,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4
 Manufacturer#5	almond azure blanched chiffon midnight	23	5
 PREHOOK: query: -- 10. testNoopWithMapWithWindowing 
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -2722,7 +2517,7 @@ from noopwithmap(on part_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 10. testNoopWithMapWithWindowing 
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -2731,10 +2526,6 @@ from noopwithmap(on part_orc
   partition by p_mfgr
   order by p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2781,12 +2572,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -2839,11 +2624,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -2876,11 +2656,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3001,7 +2776,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
 PREHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -3012,7 +2787,7 @@ order by p_name)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -3021,10 +2796,6 @@ from noop(on part_orc
 partition by p_mfgr
 order by p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3055,14 +2826,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -3115,11 +2878,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3151,11 +2909,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3276,7 +3029,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
 PREHOOK: query: -- 12. testFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -3288,7 +3041,7 @@ order by p_mfgr, p_name
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 12. testFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -3298,10 +3051,6 @@ partition by p_mfgr
 order by p_mfgr, p_name
 )))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3333,14 +3082,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -3393,11 +3134,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3452,11 +3188,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3496,11 +3227,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3623,7 +3349,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	4	4	6208.18
 Manufacturer#5	almond azure blanched chiffon midnight	23	5	5	7672.66
 PREHOOK: query: -- 13. testPTFAndWindowingInSubQ
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, 
 sub1.cd, sub1.s1 
 from (select p_mfgr, p_name, 
@@ -3638,7 +3364,7 @@ window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 13. testPTFAndWindowingInSubQ
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, 
 sub1.cd, sub1.s1 
 from (select p_mfgr, p_name, 
@@ -3651,10 +3377,6 @@ order by p_name)
 window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) 
 ) sub1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3685,14 +3407,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -3745,11 +3459,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3781,11 +3490,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -3906,7 +3610,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	4	5882.970000000001
 Manufacturer#5	almond azure blanched chiffon midnight	5	4271.3099999999995
 PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
 
-explain vectorization extended
+explain extended
 select abc.p_mfgr, abc.p_name, 
 rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
 dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
@@ -3920,7 +3624,7 @@ order by p_name
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
 
-explain vectorization extended
+explain extended
 select abc.p_mfgr, abc.p_name, 
 rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
 dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
@@ -3932,10 +3636,6 @@ partition by p_mfgr
 order by p_name 
 ) abc join part_orc p1 on abc.p_partkey = p1.p_partkey
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3967,14 +3667,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -4044,14 +3736,6 @@ STAGE PLANS:
                       auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -4104,11 +3788,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -4166,11 +3845,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -4312,7 +3986,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	4	1018.1	6208.18	46
 Manufacturer#5	almond azure blanched chiffon midnight	5	5	5	1464.48	7672.66	23	-23
 PREHOOK: query: -- 15. testDistinctInSelectWithPTF
 
-explain vectorization extended
+explain extended
 select DISTINCT p_mfgr, p_name, p_size 
 from noop(on part_orc 
 partition by p_mfgr 
@@ -4320,16 +3994,12 @@ order by p_name)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 15. testDistinctInSelectWithPTF
 
-explain vectorization extended
+explain extended
 select DISTINCT p_mfgr, p_name, p_size 
 from noop(on part_orc 
 partition by p_mfgr 
 order by p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4360,14 +4030,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -4420,11 +4082,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -4464,13 +4121,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: vectorized, llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
@@ -4565,7 +4215,7 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@part_orc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@mfgr_price_view
-PREHOOK: query: explain vectorization extended
+PREHOOK: query: explain extended
 select p_mfgr, p_brand, s, 
 sum(s) over w1  as s1
 from noop(on mfgr_price_view 
@@ -4573,7 +4223,7 @@ partition by p_mfgr
 order by p_mfgr)  
 window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization extended
+POSTHOOK: query: explain extended
 select p_mfgr, p_brand, s, 
 sum(s) over w1  as s1
 from noop(on mfgr_price_view 
@@ -4581,10 +4231,6 @@ partition by p_mfgr
 order by p_mfgr)  
 window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4625,14 +4271,6 @@ STAGE PLANS:
                         auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -4685,11 +4323,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
@@ -4723,11 +4356,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: double)
@@ -4868,7 +4496,7 @@ fv1 INT)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@part_5
-PREHOOK: query: explain vectorization extended
+PREHOOK: query: explain extended
 from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
@@ -4884,7 +4512,7 @@ cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud,
 first_value(p_size, true) over w1  as fv1
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization extended
+POSTHOOK: query: explain extended
 from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
@@ -4900,10 +4528,6 @@ cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud,
 first_value(p_size, true) over w1  as fv1
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-3 depends on stages: Stage-2
@@ -4940,14 +4564,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -5000,11 +4616,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -5045,11 +4656,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
@@ -5127,11 +4733,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -5173,11 +4774,6 @@ STAGE PLANS:
         Reducer 5 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -5445,7 +5041,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	46	46	4	4	0.8	6
 Manufacturer#5	almond azure blanched chiffon midnight	23	23	5	5	1.0	2
 PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -5464,7 +5060,7 @@ from noop(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -5481,10 +5077,6 @@ from noop(on
         partition by p_mfgr,p_name  
         order by p_mfgr,p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -5516,14 +5108,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -5576,11 +5160,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -5642,11 +5221,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -5686,11 +5260,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -5827,7 +5396,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	1	1	46	46
 Manufacturer#5	almond azure blanched chiffon midnight	1	1	23	23
 PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -5846,7 +5415,7 @@ from noop(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -5863,10 +5432,6 @@ from noop(on
         partition by p_mfgr  
         order by p_mfgr )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -5899,14 +5464,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -5959,11 +5516,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -6002,11 +5554,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6038,11 +5585,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -6074,11 +5616,6 @@ STAGE PLANS:
         Reducer 5 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6215,7 +5752,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	46	85
 Manufacturer#5	almond azure blanched chiffon midnight	5	5	23	108
 PREHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -6232,7 +5769,7 @@ from noop(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -6247,10 +5784,6 @@ from noop(on
           partition by p_mfgr 
           order by p_mfgr))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -6282,14 +5815,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -6342,11 +5867,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6385,11 +5905,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -6428,11 +5943,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6565,7 +6075,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	4	4	46	85
 Manufacturer#5	almond azure blanched chiffon midnight	5	5	23	108
 PREHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -6584,7 +6094,7 @@ from noopwithmap(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -6601,10 +6111,6 @@ from noopwithmap(on
           partition by p_mfgr,p_name 
           order by p_mfgr,p_name)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -6637,14 +6143,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -6697,11 +6195,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6740,11 +6233,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -6792,11 +6280,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6829,11 +6312,6 @@ STAGE PLANS:
         Reducer 5 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -6970,7 +6448,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	1	1	46	46
 Manufacturer#5	almond azure blanched chiffon midnight	1	1	23	23
 PREHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, 
@@ -6988,7 +6466,7 @@ from noop(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, 
@@ -7004,10 +6482,6 @@ from noop(on
           order by p_mfgr
           ))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -7039,14 +6513,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -7099,11 +6565,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -7158,11 +6619,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
@@ -7202,11 +6658,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -7341,7 +6792,7 @@ Manufacturer#5	almond aquamarine dodger light gainsboro	1	1	46	46	46
 Manufacturer#5	almond azure blanched chiffon midnight	1	1	23	23	23
 PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -7357,7 +6808,7 @@ from noopwithmap(on
 PREHOOK: type: QUERY
 POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -7371,10 +6822,6 @@ from noopwithmap(on
               order by p_mfgr, p_name) 
           ))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -7406,14 +6853,6 @@ STAGE PLANS:
                     auto parallelism: true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -7466,11 +6905,6 @@ STAGE PLANS:
         Reducer 2 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -7525,11 +6959,6 @@ STAGE PLANS:
         Reducer 3 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
@@ -7562,11 +6991,6 @@ STAGE PLANS:
         Reducer 4 
             Execution mode: llap
             Needs Tagging: false
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF Operator (PTF) not supported
-                vectorized: false
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
index 8cdb67d..e376ca1 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
@@ -1,19 +1,15 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint order by CNT
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint order by CNT
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -33,85 +29,39 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: cint is not null (type: boolean)
                     Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
                       Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: cint is not null (type: boolean)
                     Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
                       Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -138,11 +88,6 @@ STAGE PLANS:
                       value expressions: _col0 (type: bigint), _col1 (type: int), _col2 (type: int), _col3 (type: struct<count:bigint,sum:double,input:int>)
         Reducer 3 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col3] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), avg(VALUE._col3)
@@ -156,27 +101,13 @@ STAGE PLANS:
                   value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: double)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
index 84fdc16..bfac939 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
    substr(cstring1, 1, 2)
   ,substr(cstring1, 2)
@@ -24,7 +24,7 @@ and cstring1 like '%'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
    substr(cstring1, 1, 2)
   ,substr(cstring1, 2)
@@ -46,54 +46,22 @@ where cbigint % 237 = 0
 and length(substr(cstring1, 1, 2)) <= 2
 and cstring1 like '%'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 1816546 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2) and (cstring1 like '%')) (type: boolean)
-                    Statistics: Num rows: 1024 Data size: 151470 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                      Statistics: Num rows: 1024 Data size: 2265088 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 1024 Data size: 2265088 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: (((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2) and (cstring1 like '%')) (type: boolean)
+            Select Operator
+              expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+              ListSink
 
 PREHOOK: query: select 
    substr(cstring1, 1, 2)


[41/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join6.q b/ql/src/test/queries/clientpositive/vector_outer_join6.q
index b39e8ed..06fa385 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join6.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join6.q
@@ -3,7 +3,6 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -29,14 +28,14 @@ create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt;
 create table TJOIN3 stored as orc AS SELECT * FROM TJOIN3_txt;
 create table TJOIN4 stored as orc AS SELECT * FROM TJOIN4_txt;
 
-explain vectorization detail formatted
+explain
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1;
 
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1;
 
-explain vectorization detail formatted
+explain
 select tj1rnum, tj2rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q b/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q
index b825fb3..f25374d 100644
--- a/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q
+++ b/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 create table inventory_txt
 (
@@ -27,7 +27,7 @@ partitioned by (par string) stored as orc;
 insert into table inventory_part_0 partition(par='1') select * from inventory_txt;
 insert into table inventory_part_0 partition(par='2') select * from inventory_txt;
 
-explain vectorization expression
+explain
 select sum(inv_quantity_on_hand) from inventory_part_0;
 
 select sum(inv_quantity_on_hand) from inventory_part_0;
@@ -47,7 +47,7 @@ alter table inventory_part_1 add columns (fifthcol string);
 
 insert into table inventory_part_1 partition(par='5cols') select *, '5th' as fifthcol from inventory_txt;
 
-explain vectorization expression
+explain
 select sum(inv_quantity_on_hand) from inventory_part_1;
 
 select sum(inv_quantity_on_hand) from inventory_part_1;
@@ -66,7 +66,7 @@ insert into table inventory_part_2a partition(par='1') select * from inventory_t
 insert into table inventory_part_2a partition(par='2') select * from inventory_txt;
 alter table inventory_part_2a partition (par='2') change inv_item_sk other_name int;
 
-explain vectorization expression
+explain
 select sum(inv_quantity_on_hand) from inventory_part_2a;
 
 create table inventory_part_2b(
@@ -80,7 +80,7 @@ insert into table inventory_part_2b partition(par1='1',par2=4) select * from inv
 insert into table inventory_part_2b partition(par1='2',par2=3) select * from inventory_txt;
 alter table inventory_part_2b partition (par1='2',par2=3) change inv_quantity_on_hand other_name int;
 
-explain vectorization expression
+explain
 select sum(inv_quantity_on_hand) from inventory_part_2b;
 
 -- Verify we do not vectorize when a partition column type is different.
@@ -97,5 +97,5 @@ insert into table inventory_part_3 partition(par='1') select * from inventory_tx
 insert into table inventory_part_3 partition(par='2') select * from inventory_txt;
 alter table inventory_part_3 partition (par='2') change inv_warehouse_sk inv_warehouse_sk bigint;
 
-explain vectorization expression
+explain
 select sum(inv_quantity_on_hand) from inventory_part_3;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q
index ee22c01..f53d8c0 100644
--- a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q
+++ b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
 -- EXCLUDE_OS_WINDOWS
@@ -32,12 +32,12 @@ select fl_date, count(*) from flights_tiny_orc group by fl_date;
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25;
 
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25;
 
-explain vectorization expression
+explain
 select fl_date, count(*) from flights_tiny_orc group by fl_date;
 
 select fl_date, count(*) from flights_tiny_orc group by fl_date;
@@ -71,17 +71,17 @@ select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_date;
 
 select * from flights_tiny_orc_partitioned_date;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25;
 
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25;
 
-explain vectorization expression
+explain
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date;
 
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date;
@@ -115,17 +115,17 @@ select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_timestamp;
 
 select * from flights_tiny_orc_partitioned_timestamp;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25;
 
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25;
 
-explain vectorization expression
+explain
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time;
 
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_partitioned_date_time_win.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_partitioned_date_time_win.q b/ql/src/test/queries/clientpositive/vector_partitioned_date_time_win.q
index bd4931c..c157df1 100644
--- a/ql/src/test/queries/clientpositive/vector_partitioned_date_time_win.q
+++ b/ql/src/test/queries/clientpositive/vector_partitioned_date_time_win.q
@@ -30,12 +30,12 @@ select fl_date, count(*) from flights_tiny_orc group by fl_date;
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25;
 
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25;
 
-explain vectorization expression
+explain
 select fl_date, count(*) from flights_tiny_orc group by fl_date;
 
 select fl_date, count(*) from flights_tiny_orc group by fl_date;
@@ -69,17 +69,17 @@ select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_date;
 
 select * from flights_tiny_orc_partitioned_date;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25;
 
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25;
 
-explain vectorization expression
+explain
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date;
 
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date;
@@ -113,17 +113,17 @@ select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl
 
 SET hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_timestamp;
 
 select * from flights_tiny_orc_partitioned_timestamp;
 
-explain vectorization expression
+explain
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25;
 
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25;
 
-explain vectorization expression
+explain
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time;
 
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_reduce1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_reduce1.q b/ql/src/test/queries/clientpositive/vector_reduce1.q
index ce90491..cfd803f 100644
--- a/ql/src/test/queries/clientpositive/vector_reduce1.q
+++ b/ql/src/test/queries/clientpositive/vector_reduce1.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.vectorized.execution.reducesink.new.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -43,7 +42,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select b from vectortab2korc order by b;
 
 select b from vectortab2korc order by b;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_reduce2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_reduce2.q b/ql/src/test/queries/clientpositive/vector_reduce2.q
index 80ad196..ab67132 100644
--- a/ql/src/test/queries/clientpositive/vector_reduce2.q
+++ b/ql/src/test/queries/clientpositive/vector_reduce2.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.vectorized.execution.reducesink.new.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -43,7 +42,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select s, i, s2 from vectortab2korc order by s, i, s2;
 
 select s, i, s2 from vectortab2korc order by s, i, s2;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_reduce3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_reduce3.q b/ql/src/test/queries/clientpositive/vector_reduce3.q
index e01ed26..bf8206f 100644
--- a/ql/src/test/queries/clientpositive/vector_reduce3.q
+++ b/ql/src/test/queries/clientpositive/vector_reduce3.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.vectorized.execution.reducesink.new.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -43,7 +42,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select s from vectortab2korc order by s;
 
 select s from vectortab2korc order by s;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q
index bbd25ae..4a50150 100644
--- a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q
+++ b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q
@@ -1,12 +1,10 @@
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc
 WHERE cint is not null and cdouble is not null;
 
 SET hive.vectorized.execution.enabled=true;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
 WHERE cdecimal1 is not null and cdecimal2 is not null
 GROUP BY cint, cdouble, cdecimal1, cdecimal2

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_string_concat.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_string_concat.q b/ql/src/test/queries/clientpositive/vector_string_concat.q
index b03c2a4..f3a5965 100644
--- a/ql/src/test/queries/clientpositive/vector_string_concat.q
+++ b/ql/src/test/queries/clientpositive/vector_string_concat.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 DROP TABLE over1k;
 DROP TABLE over1korc;
@@ -38,7 +37,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE over1korc SELECT * FROM over1k;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS `none_z_rtrim_str`
        FROM over1korc LIMIT 20;
@@ -87,7 +86,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_string_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_string_decimal.q b/ql/src/test/queries/clientpositive/vector_string_decimal.q
index 186e339..e69cd77 100644
--- a/ql/src/test/queries/clientpositive/vector_string_decimal.q
+++ b/ql/src/test/queries/clientpositive/vector_string_decimal.q
@@ -13,7 +13,7 @@ insert overwrite table orc_decimal select id from staging;
 
 set hive.vectorized.execution.enabled=true;
 
-explain vectorization expression
+explain
 select * from orc_decimal where id in ('100000000', '200000000');
 select * from orc_decimal where id in ('100000000', '200000000');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_struct_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_struct_in.q b/ql/src/test/queries/clientpositive/vector_struct_in.q
index 207be37..50487db 100644
--- a/ql/src/test/queries/clientpositive/vector_struct_in.q
+++ b/ql/src/test/queries/clientpositive/vector_struct_in.q
@@ -1,9 +1,8 @@
 set hive.cbo.enable=false;
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.tez.dynamic.partition.pruning=false;
 set hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -12,7 +11,7 @@ create table test_1 (`id` string, `lineid` string) stored as orc;
 
 insert into table test_1 values ('one','1'), ('seven','1');
 
-explain vectorization expression
+explain
 select * from test_1 where struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -39,7 +38,7 @@ struct('nine','1'),
 struct('ten','1')
 );
 
-explain vectorization expression
+explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -72,7 +71,7 @@ create table test_2 (`id` int, `lineid` int) stored as orc;
 
 insert into table test_2 values (1,1), (7,1);
 
-explain vectorization expression
+explain
 select * from test_2 where struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -99,7 +98,7 @@ struct(9,1),
 struct(10,1)
 );
 
-explain vectorization expression
+explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -131,7 +130,7 @@ create table test_3 (`id` string, `lineid` int) stored as orc;
 
 insert into table test_3 values ('one',1), ('seven',1);
 
-explain vectorization expression
+explain
 select * from test_3 where struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -158,7 +157,7 @@ struct('nine',1),
 struct('ten',1)
 );
 
-explain vectorization expression
+explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -190,7 +189,7 @@ create table test_4 (`my_bigint` bigint, `my_string` string, `my_double` double)
 
 insert into table test_4 values (1, "b", 1.5), (1, "a", 0.5), (2, "b", 1.5);
 
-explain vectorization expression
+explain
 select * from test_4 where struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),
@@ -219,7 +218,7 @@ struct(1L, "a", 0.5D),
 struct(3L, "b", 1.5D)
 );
 
-explain vectorization expression
+explain
 select `my_bigint`, `my_string`, `my_double`, struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_tablesample_rows.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_tablesample_rows.q b/ql/src/test/queries/clientpositive/vector_tablesample_rows.q
index 94b2f5b..4deb1c8 100644
--- a/ql/src/test/queries/clientpositive/vector_tablesample_rows.q
+++ b/ql/src/test/queries/clientpositive/vector_tablesample_rows.q
@@ -4,7 +4,7 @@ SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 set hive.mapred.mode=nonstrict;
 
-explain vectorization expression
+explain
 select 'key1', 'value1' from alltypesorc tablesample (1 rows);
 
 select 'key1', 'value1' from alltypesorc tablesample (1 rows);
@@ -12,7 +12,7 @@ select 'key1', 'value1' from alltypesorc tablesample (1 rows);
 
 create table decimal_2 (t decimal(18,9)) stored as orc;
 
-explain vectorization expression
+explain
 insert overwrite table decimal_2
   select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows);
 
@@ -25,12 +25,12 @@ drop table decimal_2;
 
 
 -- Dummy tables HIVE-13190
-explain vectorization expression
+explain
 select count(1) from (select * from (Select 1 a) x order by x.a) y;
 
 select count(1) from (select * from (Select 1 a) x order by x.a) y;
 
-explain vectorization expression
+explain
 create temporary table dual as select 1;
 
 create temporary table dual as select 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_udf2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_udf2.q b/ql/src/test/queries/clientpositive/vector_udf2.q
index e62af6a..b926c4f 100644
--- a/ql/src/test/queries/clientpositive/vector_udf2.q
+++ b/ql/src/test/queries/clientpositive/vector_udf2.q
@@ -7,7 +7,7 @@ create table varchar_udf_2 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)
 insert overwrite table varchar_udf_2
   select key, value, key, value from src where key = '238' limit 1;
 
-explain vectorization expression
+explain
 select 
   c1 LIKE '%38%',
   c2 LIKE 'val_%',

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_udf3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_udf3.q b/ql/src/test/queries/clientpositive/vector_udf3.q
index bc3a5e1..8a4df79 100644
--- a/ql/src/test/queries/clientpositive/vector_udf3.q
+++ b/ql/src/test/queries/clientpositive/vector_udf3.q
@@ -1,11 +1,10 @@
 ADD JAR ivy://org.apache.hive.hive-it-custom-udfs:udf-vectorized-badexample:+;
-set hive.fetch.task.conversion=none;
 
 CREATE TEMPORARY FUNCTION rot13 as 'hive.it.custom.udfs.GenericUDFRot13';
 
 set hive.vectorized.execution.enabled=true;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT rot13(cstring1) from alltypesorc;
+EXPLAIN SELECT rot13(cstring1) from alltypesorc;
 
 SELECT cstring1, rot13(cstring1) from alltypesorc order by cstring1 desc limit 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_varchar_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_varchar_4.q b/ql/src/test/queries/clientpositive/vector_varchar_4.q
index 80f84d8..32a74a4 100644
--- a/ql/src/test/queries/clientpositive/vector_varchar_4.q
+++ b/ql/src/test/queries/clientpositive/vector_varchar_4.q
@@ -1,6 +1,5 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 drop table if exists vectortab2k;
 drop table if exists vectortab2korc;
@@ -45,7 +44,7 @@ INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 drop table if exists varchar_lazy_binary_columnar; 
 create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile;
 
-explain vectorization expression
+explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc;
 
 -- insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q b/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q
index 285d2ac..ac0570e 100644
--- a/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q
+++ b/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q
@@ -37,15 +37,15 @@ create table varchar_join1_vc2_orc stored as orc as select * from varchar_join1_
 create table varchar_join1_str_orc stored as orc as select * from varchar_join1_str;
 
 -- Join varchar with same length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
 select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- Join varchar with different length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
 select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- Join varchar with string
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1;
 select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1;
 
 drop table varchar_join1_vc1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_varchar_simple.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_varchar_simple.q b/ql/src/test/queries/clientpositive/vector_varchar_simple.q
index 6f753a7..acd6598 100644
--- a/ql/src/test/queries/clientpositive/vector_varchar_simple.q
+++ b/ql/src/test/queries/clientpositive/vector_varchar_simple.q
@@ -1,7 +1,5 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 drop table varchar_2;
 
 create table varchar_2 (
@@ -16,7 +14,7 @@ from src
 order by key asc
 limit 5;
 
-explain vectorization select key, value
+explain select key, value
 from varchar_2
 order by key asc
 limit 5;
@@ -32,7 +30,7 @@ from src
 order by key desc
 limit 5;
 
-explain vectorization select key, value
+explain select key, value
 from varchar_2
 order by key desc
 limit 5;
@@ -50,7 +48,7 @@ create table varchar_3 (
   field varchar(25)
 ) stored as orc;
 
-explain vectorization expression
+explain
 insert into table varchar_3 select cint from alltypesorc limit 10;
 
 insert into table varchar_3 select cint from alltypesorc limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_when_case_null.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_when_case_null.q b/ql/src/test/queries/clientpositive/vector_when_case_null.q
index 4acd6dc..a423b60 100644
--- a/ql/src/test/queries/clientpositive/vector_when_case_null.q
+++ b/ql/src/test/queries/clientpositive/vector_when_case_null.q
@@ -8,7 +8,7 @@ set hive.fetch.task.conversion=none;
 create table count_case_groupby (key string, bool boolean) STORED AS orc;
 insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL);
 
-explain vectorization expression
+explain
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;
 
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_0.q b/ql/src/test/queries/clientpositive/vectorization_0.q
index 00fb22a..caa6a6a 100644
--- a/ql/src/test/queries/clientpositive/vectorization_0.q
+++ b/ql/src/test/queries/clientpositive/vectorization_0.q
@@ -1,12 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
 -- Use ORDER BY clauses to generate 2 stages.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ctinyint) as c1,
        MAX(ctinyint),
        COUNT(ctinyint),
@@ -21,7 +20,7 @@ SELECT MIN(ctinyint) as c1,
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1;
@@ -30,7 +29,7 @@ SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT
   avg(ctinyint) as c1,
   variance(ctinyint),
@@ -55,7 +54,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(cbigint) as c1,
        MAX(cbigint),
        COUNT(cbigint),
@@ -70,7 +69,7 @@ SELECT MIN(cbigint) as c1,
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1;
@@ -79,7 +78,7 @@ SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT
   avg(cbigint) as c1,
   variance(cbigint),
@@ -104,7 +103,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(cfloat) as c1,
        MAX(cfloat),
        COUNT(cfloat),
@@ -119,7 +118,7 @@ SELECT MIN(cfloat) as c1,
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1;
@@ -128,7 +127,7 @@ SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT
   avg(cfloat) as c1,
   variance(cfloat),
@@ -153,7 +152,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_1.q b/ql/src/test/queries/clientpositive/vectorization_1.q
index f71218f..8fdcb27 100644
--- a/ql/src/test/queries/clientpositive/vectorization_1.q
+++ b/ql/src/test/queries/clientpositive/vectorization_1.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_10.q b/ql/src/test/queries/clientpositive/vectorization_10.q
index c5f4d43..778250a 100644
--- a/ql/src/test/queries/clientpositive/vectorization_10.q
+++ b/ql/src/test/queries/clientpositive/vectorization_10.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_11.q b/ql/src/test/queries/clientpositive/vectorization_11.q
index 3830ea9..4ac42ac 100644
--- a/ql/src/test/queries/clientpositive/vectorization_11.q
+++ b/ql/src/test/queries/clientpositive/vectorization_11.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_12.q b/ql/src/test/queries/clientpositive/vectorization_12.q
index 0728ba9..bc31f3c 100644
--- a/ql/src/test/queries/clientpositive/vectorization_12.q
+++ b/ql/src/test/queries/clientpositive/vectorization_12.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_13.q b/ql/src/test/queries/clientpositive/vectorization_13.q
index 84ae994..005808b 100644
--- a/ql/src/test/queries/clientpositive/vectorization_13.q
+++ b/ql/src/test/queries/clientpositive/vectorization_13.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,
@@ -71,7 +71,7 @@ ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5,
 LIMIT 40;
 
 -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_14.q b/ql/src/test/queries/clientpositive/vectorization_14.q
index 825fd63..4796c18 100644
--- a/ql/src/test/queries/clientpositive/vectorization_14.q
+++ b/ql/src/test/queries/clientpositive/vectorization_14.q
@@ -1,11 +1,10 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   ctimestamp1,
          cfloat,
          cstring1,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_15.q b/ql/src/test/queries/clientpositive/vectorization_15.q
index 5c48c58..21ba8c8 100644
--- a/ql/src/test/queries/clientpositive/vectorization_15.q
+++ b/ql/src/test/queries/clientpositive/vectorization_15.q
@@ -1,11 +1,10 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cboolean1,
          cdouble,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_16.q b/ql/src/test/queries/clientpositive/vectorization_16.q
index 822c824..11b709f 100644
--- a/ql/src/test/queries/clientpositive/vectorization_16.q
+++ b/ql/src/test/queries/clientpositive/vectorization_16.q
@@ -1,11 +1,10 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_17.q b/ql/src/test/queries/clientpositive/vectorization_17.q
index 57cdc41..1306f6b 100644
--- a/ql/src/test/queries/clientpositive/vectorization_17.q
+++ b/ql/src/test/queries/clientpositive/vectorization_17.q
@@ -1,11 +1,10 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cstring1,
          cint,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_2.q b/ql/src/test/queries/clientpositive/vectorization_2.q
index 4941d1e..f232815 100644
--- a/ql/src/test/queries/clientpositive/vectorization_2.q
+++ b/ql/src/test/queries/clientpositive/vectorization_2.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_3.q b/ql/src/test/queries/clientpositive/vectorization_3.q
index 2e0350a..d6e6580 100644
--- a/ql/src/test/queries/clientpositive/vectorization_3.q
+++ b/ql/src/test/queries/clientpositive/vectorization_3.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_4.q b/ql/src/test/queries/clientpositive/vectorization_4.q
index ba603c8..3151cf0 100644
--- a/ql/src/test/queries/clientpositive/vectorization_4.q
+++ b/ql/src/test/queries/clientpositive/vectorization_4.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_5.q b/ql/src/test/queries/clientpositive/vectorization_5.q
index e2d4d0a..773f4b3 100644
--- a/ql/src/test/queries/clientpositive/vectorization_5.q
+++ b/ql/src/test/queries/clientpositive/vectorization_5.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_6.q b/ql/src/test/queries/clientpositive/vectorization_6.q
index f55a2fb..803f592 100644
--- a/ql/src/test/queries/clientpositive/vectorization_6.q
+++ b/ql/src/test/queries/clientpositive/vectorization_6.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_7.q b/ql/src/test/queries/clientpositive/vectorization_7.q
index bf3a1c2..131f570 100644
--- a/ql/src/test/queries/clientpositive/vectorization_7.q
+++ b/ql/src/test/queries/clientpositive/vectorization_7.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,
@@ -60,7 +60,7 @@ LIMIT 25;
 
 
 -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_8.q b/ql/src/test/queries/clientpositive/vectorization_8.q
index d43db26..2d357f1 100644
--- a/ql/src/test/queries/clientpositive/vectorization_8.q
+++ b/ql/src/test/queries/clientpositive/vectorization_8.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,
@@ -56,7 +56,7 @@ LIMIT 20;
 
 
 -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_9.q b/ql/src/test/queries/clientpositive/vectorization_9.q
index 822c824..11b709f 100644
--- a/ql/src/test/queries/clientpositive/vectorization_9.q
+++ b/ql/src/test/queries/clientpositive/vectorization_9.q
@@ -1,11 +1,10 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_decimal_date.q b/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
index 29c025c..854ee20 100644
--- a/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
+++ b/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
@@ -1,7 +1,5 @@
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc;
 SET hive.vectorized.execution.enabled=true;
-EXPLAIN VECTORIZATION EXPRESSION  SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10;
+EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10;
 SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_div0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_div0.q b/ql/src/test/queries/clientpositive/vectorization_div0.q
index 025d457..05d81d0 100644
--- a/ql/src/test/queries/clientpositive/vectorization_div0.q
+++ b/ql/src/test/queries/clientpositive/vectorization_div0.q
@@ -1,17 +1,16 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled = true;
-set hive.fetch.task.conversion=none;
 
 -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants
-explain vectorization expression
+explain
 select cdouble / 0.0 from alltypesorc limit 100;
 select cdouble / 0.0 from alltypesorc limit 100;
 
 -- There are no zeros in the table, but there is 988888, so use it as zero
 
 -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators
-explain vectorization expression
+explain
 select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
 from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100;
 select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
@@ -19,7 +18,7 @@ from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit
 
 -- There are no zeros in the table, but there is -200.0, so use it as zero
 
-explain vectorization expression
+explain
 select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 
 from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100;
 select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_limit.q b/ql/src/test/queries/clientpositive/vectorization_limit.q
index a4c54f2..707f1ed 100644
--- a/ql/src/test/queries/clientpositive/vectorization_limit.q
+++ b/ql/src/test/queries/clientpositive/vectorization_limit.q
@@ -1,9 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
-explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
+explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
 SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
 
 set hive.optimize.reducededuplication.min.reducer=1;
@@ -11,31 +9,31 @@ set hive.limit.pushdown.memory.usage=0.3f;
 
 -- HIVE-3562 Some limit can be pushed down to map stage - c/p parts from limit_pushdown
 
-explain vectorization expression
+explain
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
 
 -- deduped RS
-explain vectorization expression
+explain
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
 
 -- distincts
-explain vectorization expression
+explain
 select distinct(ctinyint) from alltypesorc limit 20;
 select distinct(ctinyint) from alltypesorc limit 20;
 
-explain vectorization expression
+explain
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 
 -- limit zero
-explain vectorization expression
+explain
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
 
 -- 2MR (applied to last RS)
-explain vectorization expression
+explain
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_nested_udf.q b/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
index da8f99c..bb50f9b 100644
--- a/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
+++ b/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
@@ -1,5 +1,3 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 SELECT SUM(abs(ctinyint)) from alltypesorc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_not.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_not.q b/ql/src/test/queries/clientpositive/vectorization_not.q
index aa691ab..7ac507b 100644
--- a/ql/src/test/queries/clientpositive/vectorization_not.q
+++ b/ql/src/test/queries/clientpositive/vectorization_not.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_offset_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_offset_limit.q b/ql/src/test/queries/clientpositive/vectorization_offset_limit.q
index 97e1a05..3d01154 100644
--- a/ql/src/test/queries/clientpositive/vectorization_offset_limit.q
+++ b/ql/src/test/queries/clientpositive/vectorization_offset_limit.q
@@ -1,11 +1,10 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.mapred.mode=nonstrict;
-set hive.fetch.task.conversion=none;
 
-explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 3,2;
+explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 3,2;
 SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 3,2;
 
-explain vectorization expression
+explain
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 10,3;
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 10,3;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_part.q b/ql/src/test/queries/clientpositive/vectorization_part.q
index 9f4fc6e..8d677db 100644
--- a/ql/src/test/queries/clientpositive/vectorization_part.q
+++ b/ql/src/test/queries/clientpositive/vectorization_part.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC;
 insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100;
 insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_part_project.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_part_project.q b/ql/src/test/queries/clientpositive/vectorization_part_project.q
index d0dcb6f..3a48f20 100644
--- a/ql/src/test/queries/clientpositive/vectorization_part_project.q
+++ b/ql/src/test/queries/clientpositive/vectorization_part_project.q
@@ -1,11 +1,9 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC;
 insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc order by ctinyint, cint, cbigint limit 100;
 insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc order by ctinyint, cint, cbigint limit 100;
 
-explain vectorization select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10;
+explain select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10;
 select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_part_varchar.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_part_varchar.q b/ql/src/test/queries/clientpositive/vectorization_part_varchar.q
index 28646b9..d371de8 100644
--- a/ql/src/test/queries/clientpositive/vectorization_part_varchar.q
+++ b/ql/src/test/queries/clientpositive/vectorization_part_varchar.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE alltypesorc_part_varchar(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds varchar(4)) STORED AS ORC;
 insert overwrite table alltypesorc_part_varchar partition (ds='2011') select * from alltypesorc limit 100;
 insert overwrite table alltypesorc_part_varchar partition (ds='2012') select * from alltypesorc limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_pushdown.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_pushdown.q b/ql/src/test/queries/clientpositive/vectorization_pushdown.q
index 8acb193..b33cfa7 100644
--- a/ql/src/test/queries/clientpositive/vectorization_pushdown.q
+++ b/ql/src/test/queries/clientpositive/vectorization_pushdown.q
@@ -2,7 +2,5 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.optimize.index.filter=true;
-set hive.fetch.task.conversion=none;
-
-explain vectorization SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble;
+explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble;
 SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorization_short_regress.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_short_regress.q b/ql/src/test/queries/clientpositive/vectorization_short_regress.q
index 03e4bbc..114a3e2 100644
--- a/ql/src/test/queries/clientpositive/vectorization_short_regress.q
+++ b/ql/src/test/queries/clientpositive/vectorization_short_regress.q
@@ -2,7 +2,7 @@ set hive.compute.query.using.stats=false;
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 -- SORT_QUERY_RESULTS
 
@@ -36,8 +36,7 @@ set hive.fetch.task.conversion=none;
 -- ArithmeticOps: Add, Multiply, Subtract, Divide
 -- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(cint),
+EXPLAIN SELECT AVG(cint),
        (AVG(cint) + -3728),
        (-((AVG(cint) + -3728))),
        (-((-((AVG(cint) + -3728))))),
@@ -113,8 +112,7 @@ WHERE  ((762 = cbigint)
 -- ArithmeticOps: Divide, Multiply, Remainder, Subtract
 -- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT MAX(cint),
+EXPLAIN SELECT MAX(cint),
        (MAX(cint) / -3728),
        (MAX(cint) * -3728),
        VAR_POP(cbigint),
@@ -184,8 +182,7 @@ WHERE  (((cbigint <= 197)
 -- ArithmeticOps: Subtract, Remainder, Multiply, Add
 -- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT VAR_POP(cbigint),
+EXPLAIN SELECT VAR_POP(cbigint),
        (-(VAR_POP(cbigint))),
        (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))),
        COUNT(*),
@@ -253,8 +250,7 @@ WHERE  ((ctimestamp1 = ctimestamp2)
 -- ArithmeticOps: Add, Divide, Remainder, Multiply
 -- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(ctinyint),
+EXPLAIN SELECT AVG(ctinyint),
        (AVG(ctinyint) + 6981),
        ((AVG(ctinyint) + 6981) + AVG(ctinyint)),
        MAX(cbigint),
@@ -302,8 +298,7 @@ WHERE  (((ctimestamp2 <= ctimestamp1)
 -- ArithmeticOps: Multiply, Subtract, Add, Divide
 -- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cdouble,
        ctimestamp2,
        cstring1,
@@ -381,8 +376,7 @@ LIMIT 50;
 -- ArithmeticOps: Divide, Remainder, Subtract, Multiply
 -- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cbigint,
        cstring1,
        cboolean1,
@@ -457,8 +451,7 @@ LIMIT 25;
 -- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder
 -- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cint,
+EXPLAIN SELECT   cint,
          cstring1,
          cboolean2,
          ctimestamp2,
@@ -531,8 +524,7 @@ LIMIT 75;
 -- ArithmeticOps: Divide, Subtract, Multiply, Remainder
 -- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring2,
          cdouble,
          cfloat,
@@ -591,8 +583,7 @@ LIMIT 45;
 -- ArithmeticOps: Remainder, Divide, Subtract
 -- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   csmallint,
+EXPLAIN SELECT   csmallint,
          (csmallint % -75) as c1,
          STDDEV_SAMP(csmallint) as c2,
          (-1.389 / csmallint) as c3,
@@ -637,8 +628,7 @@ LIMIT 20;
 -- ArithmeticOps: Multiply, Add, Subtract, Remainder
 -- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cdouble,
+EXPLAIN SELECT   cdouble,
          VAR_SAMP(cdouble),
          (2563.58 * VAR_SAMP(cdouble)),
          (-(VAR_SAMP(cdouble))),
@@ -696,8 +686,7 @@ ORDER BY cdouble;
 -- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder
 -- FilterOps: NotEqual, LessThan, Like, Equal, RLike
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring1,
          STDDEV_POP(cint) as c1,
          (STDDEV_POP(cint) * 10.175) as c2,
@@ -812,8 +801,7 @@ LIMIT 50;
 -- ArithmeticOps: Divide, Subtract, Remainder, Add, Multiply
 -- FilterOps: GreaterThan, LessThanOrEqual, Equal, LessThan, GreaterThanOrEqual, NotEqual, Like, RLike
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cboolean1,
+EXPLAIN SELECT   cboolean1,
          MAX(cfloat),
          (-(MAX(cfloat))),
          (-26.28 / MAX(cfloat)),
@@ -895,12 +883,12 @@ ORDER BY cboolean1;
 -- These tests verify COUNT on empty or null colulmns work correctly.
 create table test_count(i int) stored as orc;
 
-explain vectorization expression
+explain
 select count(*) from test_count;
 
 select count(*) from test_count;
 
-explain vectorization expression
+explain
 select count(i) from test_count;
 
 select count(i) from test_count;
@@ -923,32 +911,32 @@ insert into table alltypesnull select null, null, null, null, null, null, null,
 
 create table alltypesnullorc stored as orc as select * from alltypesnull;
 
-explain vectorization expression
+explain
 select count(*) from alltypesnullorc;
 
 select count(*) from alltypesnullorc;
 
-explain vectorization expression
+explain
 select count(ctinyint) from alltypesnullorc;
 
 select count(ctinyint) from alltypesnullorc;
 
-explain vectorization expression
+explain
 select count(cint) from alltypesnullorc;
 
 select count(cint) from alltypesnullorc;
 
-explain vectorization expression
+explain
 select count(cfloat) from alltypesnullorc;
 
 select count(cfloat) from alltypesnullorc;
 
-explain vectorization expression
+explain
 select count(cstring1) from alltypesnullorc;
 
 select count(cstring1) from alltypesnullorc;
 
-explain vectorization expression
+explain
 select count(cboolean1) from alltypesnullorc;
 
 select count(cboolean1) from alltypesnullorc;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q b/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
index 191d8c6..022ce2e 100644
--- a/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
+++ b/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
@@ -1,6 +1,4 @@
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-
 create table vsmb_bucket_1(key int, value string) 
   CLUSTERED BY (key) 
   SORTED BY (key) INTO 1 BUCKETS 
@@ -30,11 +28,11 @@ set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
-explain vectorization expression
+explain
 select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key;
 select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key;
 
-explain vectorization expression
+explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key;
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key;
 
@@ -43,6 +41,6 @@ select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 
-explain vectorization expression
+explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key;
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_case.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_case.q b/ql/src/test/queries/clientpositive/vectorized_case.q
index 2efacb4..e74bf82 100644
--- a/ql/src/test/queries/clientpositive/vectorized_case.q
+++ b/ql/src/test/queries/clientpositive/vectorized_case.q
@@ -2,7 +2,7 @@ set hive.explain.user=false;
 set hive.fetch.task.conversion=none;
 set hive.vectorized.execution.enabled = true
 ;
-explain vectorization expression
+explain
 select 
   csmallint,
   case 
@@ -37,7 +37,7 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 ;
-explain vectorization expression
+explain
 select 
   csmallint,
   case 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_casts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_casts.q b/ql/src/test/queries/clientpositive/vectorized_casts.q
index a32c150..0880e29 100644
--- a/ql/src/test/queries/clientpositive/vectorized_casts.q
+++ b/ql/src/test/queries/clientpositive/vectorized_casts.q
@@ -8,7 +8,7 @@ SET hive.vectorized.execution.enabled = true;
 -- Currently, vectorization is not supported in fetch task (hive.fetch.task.conversion=none)
 -- Test type casting in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
 -- to boolean
    cast (ctinyint as boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_context.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_context.q b/ql/src/test/queries/clientpositive/vectorized_context.q
index 0558bc0..657270e 100644
--- a/ql/src/test/queries/clientpositive/vectorized_context.q
+++ b/ql/src/test/queries/clientpositive/vectorized_context.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-
 create table store(s_store_sk int, s_city string)
 stored as orc;
 insert overwrite table store
@@ -28,7 +26,7 @@ set hive.vectorized.execution.enabled=true;
 
 set hive.mapjoin.hybridgrace.hashtable=false;
 
-explain vectorization 
+explain 
 select store.s_city, ss_net_profit
 from store_sales
 JOIN store ON store_sales.ss_store_sk = store.s_store_sk

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
index b9a7ecf..7d7b1cf 100644
--- a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
+++ b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled = true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -25,7 +24,7 @@ INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date,
 
 SELECT * FROM date_udf_flight_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(fl_time),
   year(fl_time),
   month(fl_time),
@@ -55,7 +54,7 @@ SELECT
   datediff(fl_time, "2000-01-01")
 FROM date_udf_flight_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(fl_date),
   year(fl_date),
   month(fl_date),
@@ -85,7 +84,7 @@ SELECT
   datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   year(fl_time) = year(fl_date),
   month(fl_time) = month(fl_date),
   day(fl_time) = day(fl_date),
@@ -114,7 +113,7 @@ SELECT
   datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT 
+EXPLAIN SELECT 
   fl_date, 
   to_date(date_add(fl_date, 2)), 
   to_date(date_sub(fl_date, 2)),
@@ -135,7 +134,7 @@ FROM date_udf_flight_orc LIMIT 10;
 -- Test extracting the date part of expression that includes time
 SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   min(fl_date) AS c1,
   max(fl_date),
   count(fl_date),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q b/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
index 4339a5f..6900dc0 100644
--- a/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
+++ b/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
@@ -1,15 +1,14 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 SET hive.map.groupby.sorted=true;
 
 create table dtest(a int, b int) clustered by (a) sorted by (a) into 1 buckets stored as orc;
 insert into table dtest select c,b from (select array(300,300,300,300,300) as a, 1 as b from src order by a limit 1) y lateral view  explode(a) t1 as c;
 
-explain vectorization select sum(distinct a), count(distinct a) from dtest;
+explain select sum(distinct a), count(distinct a) from dtest;
 select sum(distinct a), count(distinct a) from dtest;
 
-explain vectorization select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc;
+explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc;
 select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc;


[13/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
index f0d2a50..8cf503f 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,
@@ -35,7 +35,7 @@ LIMIT 40
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,
@@ -68,10 +68,6 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1
 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16
 LIMIT 40
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -90,34 +86,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2028982 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4, val 3569.0) -> boolean, FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5) -> boolean, FilterLongColNotEqualLongScalar(col 10, val 1) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 12, val 11.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterDoubleColNotEqualDoubleScalar(col 12, val 12.0)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean, FilterDecimalColLessDecimalScalar(col 13, val 9763215.5639)(children: CastLongToDecimal(col 0) -> 13:decimal(11,4)) -> boolean) -> boolean) -> boolean
                     predicate: (((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
                       outputColumnNames: cboolean1, ctinyint, ctimestamp1, cfloat, cstring1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [10, 0, 8, 4, 6]
                       Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFMaxLong(col 0) -> tinyint, VectorUDAFSumDouble(col 4) -> double, VectorUDAFStdPopDouble(col 4) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopLong(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMaxDouble(col 4) -> float, VectorUDAFMinLong(col 0) -> tinyint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            keyExpressions: col 10, col 0, col 8, col 4, col 6
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFStdPopDouble(col 4) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         keys: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
@@ -130,21 +107,8 @@ STAGE PLANS:
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_pop parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col2] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
@@ -163,33 +127,16 @@ STAGE PLANS:
                     TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: tinyint), KEY.reducesinkkey6 (type: tinyint), KEY.reducesinkkey7 (type: tinyint), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey14 (type: double), KEY.reducesinkkey15 (type: decimal(7,3)), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: float), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: tinyint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 10, 14, 15, 16, 17, 18, 19, 20]
                 Statistics: Num rows: 1365 Data size: 446640 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 40
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 40 Data size: 13206 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 40 Data size: 13206 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -311,7 +258,7 @@ NULL	-64	1969-12-31 16:00:11.912	-64.0	NULL	64	-64	0	-64.0	-0.0	64.0	-5091.392	0
 NULL	-64	1969-12-31 16:00:12.339	-64.0	NULL	64	-64	0	-64.0	-0.0	64.0	-5091.392	0.0	64.0	0.0	-10.175	-64.0	0.410625	-64.0	0.0	-64
 NULL	-64	1969-12-31 16:00:13.274	-64.0	NULL	64	-64	0	-64.0	-0.0	64.0	-5091.392	0.0	64.0	0.0	-10.175	-64.0	0.410625	-64.0	0.0	-64
 PREHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,
@@ -345,7 +292,7 @@ ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5,
 LIMIT 40
 PREHOOK: type: QUERY
 POSTHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT   cboolean1,
          ctinyint,
          ctimestamp1,
@@ -378,10 +325,6 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1
 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16
 LIMIT 40
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -400,34 +343,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2028982 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4, val 3569.0) -> boolean, FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5) -> boolean, FilterLongColNotEqualLongScalar(col 10, val 1) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 12, val -1.388)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterDoubleColNotEqualDoubleScalar(col 12, val -1.3359999999999999)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean, FilterDecimalColLessDecimalScalar(col 13, val 9763215.5639)(children: CastLongToDecimal(col 0) -> 13:decimal(11,4)) -> boolean) -> boolean) -> boolean
                     predicate: (((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
                       outputColumnNames: cboolean1, ctinyint, ctimestamp1, cfloat, cstring1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [10, 0, 8, 4, 6]
                       Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFMaxLong(col 0) -> tinyint, VectorUDAFSumDouble(col 4) -> double, VectorUDAFStdPopDouble(col 4) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopLong(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMaxDouble(col 4) -> float, VectorUDAFMinLong(col 0) -> tinyint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            keyExpressions: col 10, col 0, col 8, col 4, col 6
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFStdPopDouble(col 4) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         keys: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
@@ -440,21 +364,8 @@ STAGE PLANS:
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_pop parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col2] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
@@ -473,33 +384,16 @@ STAGE PLANS:
                     TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: tinyint), KEY.reducesinkkey6 (type: tinyint), KEY.reducesinkkey7 (type: tinyint), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey14 (type: double), KEY.reducesinkkey15 (type: decimal(7,3)), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: float), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: tinyint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 10, 14, 15, 16, 17, 18, 19, 20]
                 Statistics: Num rows: 1365 Data size: 446640 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 40
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 40 Data size: 13206 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 40 Data size: 13206 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
index e6fdca9..c227e44 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   ctimestamp1,
          cfloat,
          cstring1,
@@ -35,7 +35,7 @@ ORDER BY cstring1, cfloat, cdouble, ctimestamp1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   ctimestamp1,
          cfloat,
          cstring1,
@@ -68,10 +68,6 @@ WHERE    (((ctinyint <= cbigint)
 GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble
 ORDER BY cstring1, cfloat, cdouble, ctimestamp1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -111,21 +107,8 @@ STAGE PLANS:
                           value expressions: _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: float), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_samp(VALUE._col0), max(VALUE._col1), stddev_pop(VALUE._col2), count(VALUE._col3), var_pop(VALUE._col4), var_samp(VALUE._col5)
@@ -144,13 +127,6 @@ STAGE PLANS:
                     value expressions: _col3 (type: boolean), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: float), _col10 (type: float), _col11 (type: float), _col12 (type: double), _col13 (type: double), _col14 (type: bigint), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: double), _col21 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey3 (type: timestamp), KEY.reducesinkkey1 (type: float), KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), KEY.reducesinkkey2 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: float), VALUE._col6 (type: float), VALUE._col7 (type: float), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: bigint), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col14 (type: double), VALUE._col15 (type: double), VALUE._col16 (type: double), VALUE._col17 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
index a9908a4..768aed4 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cboolean1,
          cdouble,
@@ -33,7 +33,7 @@ ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cboolean1,
          cdouble,
@@ -64,10 +64,6 @@ WHERE    (((cstring2 LIKE '%ss%')
 GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
 ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -107,21 +103,8 @@ STAGE PLANS:
                           value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
@@ -140,13 +123,6 @@ STAGE PLANS:
                     value expressions: _col7 (type: double), _col8 (type: decimal(13,2)), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: tinyint), _col16 (type: double), _col17 (type: float), _col18 (type: int), _col19 (type: decimal(13,2)), _col20 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: boolean), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: tinyint), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: timestamp), VALUE._col0 (type: double), VALUE._col1 (type: decimal(13,2)), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: tinyint), VALUE._col9 (type: double), VALUE._col10 (type: float), VALUE._col11 (type: int), VALUE._col12 (type: decimal(13,2)), VALUE._col13 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
index 22041cc..a1eb629 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,
@@ -22,7 +22,7 @@ GROUP BY cstring1, cdouble, ctimestamp1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,
@@ -42,10 +42,6 @@ WHERE    ((cstring2 LIKE '%b%')
               OR (cstring1 < 'a')))
 GROUP BY cstring1, cdouble, ctimestamp1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -84,21 +80,8 @@ STAGE PLANS:
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col1] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
index 007ce8f..3a77cc9 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cstring1,
          cint,
@@ -26,7 +26,7 @@ ORDER BY cbigint, cfloat
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cfloat,
          cstring1,
          cint,
@@ -50,10 +50,6 @@ WHERE    (((cbigint > -23)
                   OR (cfloat = cdouble))))
 ORDER BY cbigint, cfloat
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -85,23 +81,8 @@ STAGE PLANS:
                         value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: timestamp), _col4 (type: double), _col6 (type: double), _col7 (type: bigint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: decimal(11,4)), _col13 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: float), VALUE._col0 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: timestamp), VALUE._col3 (type: double), KEY.reducesinkkey0 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: bigint), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: decimal(11,4)), VALUE._col11 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
index aeec133..54cc498 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,
@@ -29,7 +29,7 @@ LIMIT 25
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,
@@ -56,10 +56,6 @@ WHERE  ((ctinyint != 0)
 ORDER BY cboolean1, cbigint, csmallint, ctinyint, ctimestamp1, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9
 LIMIT 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -77,74 +73,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3019778 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0, val 0) -> boolean, FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 12, val 0.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterLongColEqualLongColumn(col 0, col 2)(children: col 0) -> boolean, FilterStringColLikeStringScalar(col 7, pattern ss) -> boolean) -> boolean, FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 12, val -15.0)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean, FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5) -> boolean) -> boolean) -> boolean) -> boolean
                     predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1789382 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [10, 3, 1, 0, 8, 6, 13, 14, 15, 16, 18, 19, 17, 20, 22]
-                          selectExpressions: LongColAddLongColumn(col 3, col 3) -> 13:long, LongColModuloLongScalar(col 1, val -257)(children: col 1) -> 14:long, LongColUnaryMinus(col 1) -> 15:long, LongColUnaryMinus(col 0) -> 16:long, LongColAddLongScalar(col 17, val 17)(children: col 17) -> 18:long, LongColMultiplyLongColumn(col 3, col 17)(children: col 17) -> 19:long, LongColModuloLongColumn(col 2, col 1)(children: col 1) -> 17:long, LongColUnaryMinus(col 0) -> 20:long, LongColModuloLongColumn(col 21, col 0)(children: LongColUnaryMinus(col 0) -> 21:long) -> 22:long
                       Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: smallint), KEY.reducesinkkey3 (type: tinyint), KEY.reducesinkkey4 (type: timestamp), KEY.reducesinkkey5 (type: string), KEY.reducesinkkey6 (type: bigint), KEY.reducesinkkey7 (type: int), KEY.reducesinkkey8 (type: smallint), KEY.reducesinkkey9 (type: tinyint), KEY.reducesinkkey10 (type: int), KEY.reducesinkkey11 (type: bigint), KEY.reducesinkkey12 (type: int), KEY.reducesinkkey9 (type: tinyint), KEY.reducesinkkey14 (type: tinyint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 9, 14]
                 Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 4380 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 4380 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -239,7 +193,7 @@ NULL	NULL	-7196	1	1969-12-31 15:59:48.361	NULL	NULL	0	7196	-1	16	NULL	NULL	-1	0
 NULL	NULL	-7196	14	1969-12-31 15:59:50.291	NULL	NULL	0	7196	-14	3	NULL	NULL	-14	0
 NULL	NULL	-7196	22	1969-12-31 15:59:52.699	NULL	NULL	0	7196	-22	-5	NULL	NULL	-22	0
 PREHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,
@@ -267,7 +221,7 @@ ORDER BY cboolean1, cbigint, csmallint, ctinyint, ctimestamp1, cstring1, c1, c2,
 LIMIT 25
 PREHOOK: type: QUERY
 POSTHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1,
        cbigint,
        csmallint,
@@ -294,10 +248,6 @@ WHERE  ((ctinyint != 0)
 ORDER BY cboolean1, cbigint, csmallint, ctinyint, ctimestamp1, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9
 LIMIT 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -315,74 +265,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3019778 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0, val 0) -> boolean, FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 12, val 0.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterLongColEqualLongColumn(col 0, col 2)(children: col 0) -> boolean, FilterStringColLikeStringScalar(col 7, pattern ss) -> boolean) -> boolean, FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 12, val 7.6850000000000005)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean, FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5) -> boolean) -> boolean) -> boolean) -> boolean
                     predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1789382 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [10, 3, 1, 0, 8, 6, 13, 14, 15, 16, 18, 19, 17, 20, 22]
-                          selectExpressions: LongColAddLongColumn(col 3, col 3) -> 13:long, LongColModuloLongScalar(col 1, val -257)(children: col 1) -> 14:long, LongColUnaryMinus(col 1) -> 15:long, LongColUnaryMinus(col 0) -> 16:long, LongColAddLongScalar(col 17, val 17)(children: col 17) -> 18:long, LongColMultiplyLongColumn(col 3, col 17)(children: col 17) -> 19:long, LongColModuloLongColumn(col 2, col 1)(children: col 1) -> 17:long, LongColUnaryMinus(col 0) -> 20:long, LongColModuloLongColumn(col 21, col 0)(children: LongColUnaryMinus(col 0) -> 21:long) -> 22:long
                       Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: smallint), KEY.reducesinkkey3 (type: tinyint), KEY.reducesinkkey4 (type: timestamp), KEY.reducesinkkey5 (type: string), KEY.reducesinkkey6 (type: bigint), KEY.reducesinkkey7 (type: int), KEY.reducesinkkey8 (type: smallint), KEY.reducesinkkey9 (type: tinyint), KEY.reducesinkkey10 (type: int), KEY.reducesinkkey11 (type: bigint), KEY.reducesinkkey12 (type: int), KEY.reducesinkkey9 (type: tinyint), KEY.reducesinkkey14 (type: tinyint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 9, 14]
                 Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 4380 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 4380 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
index d0410bd..70ef835 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,
@@ -27,7 +27,7 @@ LIMIT 20
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,
@@ -52,10 +52,6 @@ WHERE  (((cstring2 IS NOT NULL)
 ORDER BY ctimestamp1, cdouble, cboolean1, cstring1, cfloat, c1, c2, c3, c4, c5, c6, c7, c8, c9
 LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -73,74 +69,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2983078 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7) -> boolean, FilterDoubleColLessEqualDoubleScalar(col 12, val 10.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterDoubleColNotEqualDoubleScalar(col 12, val 16.0)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean) -> boolean, FilterDoubleColLessDoubleScalar(col 4, val -6432.0) -> boolean, FilterExprAndExpr(children: SelectColumnIsNotNull(col 10) -> boolean, FilterDoubleColEqualDoubleScalar(col 5, val 988888.0) -> boolean) -> boolean) -> boolean
                     predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0)) or (cfloat < -6432) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                     Statistics: Num rows: 3060 Data size: 743036 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 5, 10, 6, 4, 12, 13, 14, 16, 18, 15, 17, 19, 21]
-                          selectExpressions: DoubleColUnaryMinus(col 5) -> 12:double, DoubleScalarSubtractDoubleColumn(val -5638.15, col 5) -> 13:double, DoubleColMultiplyDoubleScalar(col 5, val -257.0) -> 14:double, DoubleColAddDoubleColumn(col 15, col 4)(children: CastLongToFloatViaLongToDouble(col 2) -> 15:double) -> 16:double, DoubleColAddDoubleColumn(col 15, col 17)(children: DoubleColUnaryMinus(col 5) -> 15:double, CastLongToDouble(col 3) -> 17:double) -> 18:double, DoubleColUnaryMinus(col 5) -> 15:double, DoubleScalarSubtractDoubleColumn(val -1.3890000581741333, col 4) -> 17:double, DoubleColUnaryMinus(col 4) -> 19:double, DoubleColAddDoubleColumn(col 20, col 22)(children: DoubleScalarSubtractDoubleColumn(val -5638.15, col 5) -> 20:double, col 22) -> 21:double
                       Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: boolean), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: float), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double), KEY.reducesinkkey8 (type: float), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: float), KEY.reducesinkkey13 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 5, 11, 12, 13]
                 Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -226,7 +180,7 @@ POSTHOOK: Input: default@alltypesorc
 1969-12-31 15:59:43.807	-7196.0	NULL	NULL	42.0	7196.0	1557.8500000000004	1849372.0	NULL	-5.98226333E8	7196.0	-43.389	-42.0	NULL
 1969-12-31 15:59:43.82	-7196.0	NULL	NULL	-30.0	7196.0	1557.8500000000004	1849372.0	NULL	1.329550715E9	7196.0	28.611	30.0	NULL
 PREHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,
@@ -252,7 +206,7 @@ ORDER BY ctimestamp1, cdouble, cboolean1, cstring1, cfloat, c1, c2, c3, c4, c5,
 LIMIT 20
 PREHOOK: type: QUERY
 POSTHOOK: query: -- double compare timestamp
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ctimestamp1,
        cdouble,
        cboolean1,
@@ -277,10 +231,6 @@ WHERE  (((cstring2 IS NOT NULL)
 ORDER BY ctimestamp1, cdouble, cboolean1, cstring1, cfloat, c1, c2, c3, c4, c5, c6, c7, c8, c9
 LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -298,74 +248,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2983078 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7) -> boolean, FilterDoubleColLessEqualDoubleScalar(col 12, val 12.503)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterDoubleColNotEqualDoubleScalar(col 12, val 11.998)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean) -> boolean, FilterDoubleColLessDoubleScalar(col 4, val -6432.0) -> boolean, FilterExprAndExpr(children: SelectColumnIsNotNull(col 10) -> boolean, FilterDoubleColEqualDoubleScalar(col 5, val 988888.0) -> boolean) -> boolean) -> boolean
                     predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998)) or (cfloat < -6432) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                     Statistics: Num rows: 3060 Data size: 743036 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 5, 10, 6, 4, 12, 13, 14, 16, 18, 15, 17, 19, 21]
-                          selectExpressions: DoubleColUnaryMinus(col 5) -> 12:double, DoubleScalarSubtractDoubleColumn(val -5638.15, col 5) -> 13:double, DoubleColMultiplyDoubleScalar(col 5, val -257.0) -> 14:double, DoubleColAddDoubleColumn(col 15, col 4)(children: CastLongToFloatViaLongToDouble(col 2) -> 15:double) -> 16:double, DoubleColAddDoubleColumn(col 15, col 17)(children: DoubleColUnaryMinus(col 5) -> 15:double, CastLongToDouble(col 3) -> 17:double) -> 18:double, DoubleColUnaryMinus(col 5) -> 15:double, DoubleScalarSubtractDoubleColumn(val -1.3890000581741333, col 4) -> 17:double, DoubleColUnaryMinus(col 4) -> 19:double, DoubleColAddDoubleColumn(col 20, col 22)(children: DoubleScalarSubtractDoubleColumn(val -5638.15, col 5) -> 20:double, col 22) -> 21:double
                       Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: boolean), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: float), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double), KEY.reducesinkkey8 (type: float), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: float), KEY.reducesinkkey13 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 5, 11, 12, 13]
                 Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
index 22041cc..a1eb629 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,
@@ -22,7 +22,7 @@ GROUP BY cstring1, cdouble, ctimestamp1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION 
+EXPLAIN 
 SELECT   cstring1,
          cdouble,
          ctimestamp1,
@@ -42,10 +42,6 @@ WHERE    ((cstring2 LIKE '%b%')
               OR (cstring1 < 'a')))
 GROUP BY cstring1, cdouble, ctimestamp1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -84,21 +80,8 @@ STAGE PLANS:
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col1] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
index 74455f5..9a6cb52 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
@@ -12,78 +12,28 @@ POSTHOOK: Lineage: date_decimal_test.cdate EXPRESSION [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: date_decimal_test.cdecimal EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: date_decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: date_decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
+PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
+POSTHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: date_decimal_test
-                  Statistics: Num rows: 12288 Data size: 1651260 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0) -> boolean, SelectColumnIsNotNull(col 1) -> boolean) -> boolean
-                    predicate: (cint is not null and cdouble is not null) (type: boolean)
-                    Statistics: Num rows: 12288 Data size: 1651260 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: cdate (type: date), cdecimal (type: decimal(20,10))
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 3]
-                      Statistics: Num rows: 12288 Data size: 1651260 Basic stats: COMPLETE Column stats: NONE
-                      Limit
-                        Number of rows: 10
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                        Statistics: Num rows: 10 Data size: 1340 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-                          Statistics: Num rows: 10 Data size: 1340 Basic stats: COMPLETE Column stats: NONE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: date_decimal_test
+          Filter Operator
+            predicate: (cint is not null and cdouble is not null) (type: boolean)
+            Select Operator
+              expressions: cdate (type: date), cdecimal (type: decimal(20,10))
+              outputColumnNames: _col0, _col1
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
index 872e7f3..2078e81 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
@@ -46,14 +46,10 @@ POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cstring2 SIMPLE [(alltype
 POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
-PREHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10
+PREHOOK: query: explain select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10
+POSTHOOK: query: explain select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -82,23 +78,8 @@ STAGE PLANS:
                       TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
index 6324e01..6bd4bd6 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
@@ -1,12 +1,8 @@
 WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: explain vectorization SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble
+PREHOOK: query: explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble
+POSTHOOK: query: explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -43,21 +39,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: struct<count:bigint,sum:double,input:bigint>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:bigint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0)


[59/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b597ab2a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b597ab2a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b597ab2a

Branch: refs/heads/hive-14535
Commit: b597ab2a07034b9c82e4bb0591123c3a115f27eb
Parents: 4b7f373
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Sep 28 20:23:33 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue Oct 18 12:27:41 2016 +0100

----------------------------------------------------------------------
 .../druid/HiveDruidQueryBasedInputFormat.java   |    6 +-
 .../serde/DruidGroupByQueryRecordReader.java    |    2 +-
 .../serde/DruidSelectQueryRecordReader.java     |    2 +-
 .../hadoop/hive/druid/serde/DruidSerDe.java     |    2 +-
 .../serde/DruidTimeseriesQueryRecordReader.java |    2 +-
 .../druid/serde/DruidTopNQueryRecordReader.java |    2 +-
 pom.xml                                         |    3 +-
 ql/pom.xml                                      |    7 +-
 .../calcite/HiveDefaultRelMetadataProvider.java |    2 +-
 .../optimizer/calcite/HivePlannerContext.java   |    9 +-
 .../ql/optimizer/calcite/HiveRelBuilder.java    |   18 +-
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |    8 +-
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  |  821 --------------
 .../optimizer/calcite/HiveTypeSystemImpl.java   |   39 +-
 .../calcite/cost/HiveDefaultCostModel.java      |    7 +-
 .../optimizer/calcite/cost/HiveRelMdCost.java   |   10 +-
 .../calcite/druid/DruidIntervalUtils.java       |  466 --------
 .../ql/optimizer/calcite/druid/DruidQuery.java  | 1053 ------------------
 .../optimizer/calcite/druid/DruidQueryType.java |   42 -
 .../ql/optimizer/calcite/druid/DruidRules.java  |  591 ----------
 .../ql/optimizer/calcite/druid/DruidSchema.java |   51 -
 .../ql/optimizer/calcite/druid/DruidTable.java  |  121 --
 .../optimizer/calcite/druid/HiveDruidConf.java  |   33 -
 .../calcite/reloperators/HiveAggregate.java     |    3 +-
 .../reloperators/HiveDateGranularity.java       |   54 -
 .../calcite/reloperators/HiveExtractDate.java   |   50 +
 .../calcite/reloperators/HiveFloorDate.java     |   64 ++
 .../rules/HiveAggregateJoinTransposeRule.java   |    9 +-
 .../rules/HiveAggregateProjectMergeRule.java    |    3 +-
 .../rules/HiveFilterProjectTSTransposeRule.java |   16 +-
 .../rules/HiveFilterProjectTransposeRule.java   |   21 +-
 .../calcite/rules/HivePreFilteringRule.java     |    7 +-
 .../rules/HiveReduceExpressionsRule.java        |  914 ++-------------
 .../HiveReduceExpressionsWithStatsRule.java     |    5 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |  243 +---
 .../calcite/stats/HiveRelMdCollation.java       |   10 +-
 .../calcite/stats/HiveRelMdDistribution.java    |   10 +-
 .../calcite/stats/HiveRelMdPredicates.java      |   31 +-
 .../calcite/stats/HiveRelMdSelectivity.java     |   28 +-
 .../optimizer/calcite/stats/HiveRelMdSize.java  |   13 +-
 .../calcite/stats/HiveRelMdUniqueKeys.java      |   72 +-
 .../calcite/translator/ASTBuilder.java          |   49 +-
 .../calcite/translator/ASTConverter.java        |   51 +-
 .../calcite/translator/ExprNodeConverter.java   |   49 +-
 .../translator/PlanModifierForASTConv.java      |    5 +
 .../calcite/translator/RexNodeConverter.java    |   61 +-
 .../translator/SqlFunctionConverter.java        |   37 +-
 .../calcite/translator/TypeConverter.java       |   41 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   40 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |    2 +-
 .../optimizer/calcite/TestCBOMaxNumToCNF.java   |    5 +-
 .../calcite/TestCBORuleFiredOnlyOnce.java       |    2 +-
 .../results/clientpositive/druid_basic2.q.out   |   48 +-
 .../clientpositive/druid_intervals.q.out        |   40 +-
 .../clientpositive/druid_timeseries.q.out       |   52 +-
 .../results/clientpositive/druid_topn.q.out     |   32 +-
 .../clientpositive/explain_logical.q.out        |   48 +-
 .../clientpositive/groupby_sort_1_23.q.out      |   40 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |   40 +-
 .../results/clientpositive/limit_pushdown.q.out |   12 +-
 .../clientpositive/limit_pushdown3.q.out        |   12 +-
 .../clientpositive/llap/explainuser_4.q.out     |   32 +-
 .../clientpositive/llap/limit_pushdown.q.out    |    9 +-
 .../results/clientpositive/llap/lineage3.q.out  |    2 +-
 .../llap/table_access_keys_stats.q.out          |    6 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |   42 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |   42 +-
 .../offset_limit_ppd_optimizer.q.out            |   12 +-
 .../results/clientpositive/perf/query75.q.out   |   12 +-
 .../spark/groupby_sort_1_23.q.out               |   32 +-
 .../spark/groupby_sort_skew_1_23.q.out          |   32 +-
 .../clientpositive/spark/limit_pushdown.q.out   |    9 +-
 .../spark/table_access_keys_stats.q.out         |    6 +-
 .../clientpositive/tez/explainanalyze_4.q.out   |   32 +-
 .../tez/vectorization_limit.q.out               |    9 +-
 .../clientpositive/vectorization_limit.q.out    |   12 +-
 76 files changed, 1136 insertions(+), 4669 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/HiveDruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/HiveDruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/HiveDruidQueryBasedInputFormat.java
index 3df1452..a18e590 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/HiveDruidQueryBasedInputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/HiveDruidQueryBasedInputFormat.java
@@ -25,6 +25,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.adapter.druid.DruidDateTimeUtils;
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -37,8 +39,6 @@ import org.apache.hadoop.hive.druid.serde.DruidSelectQueryRecordReader;
 import org.apache.hadoop.hive.druid.serde.DruidTimeseriesQueryRecordReader;
 import org.apache.hadoop.hive.druid.serde.DruidTopNQueryRecordReader;
 import org.apache.hadoop.hive.druid.serde.DruidWritable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidIntervalUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.JobConf;
@@ -273,7 +273,7 @@ public class HiveDruidQueryBasedInputFormat extends InputFormat<NullWritable, Dr
   }
 
   private static List<List<Interval>> createSplitsIntervals(List<Interval> intervals, int numSplits) {
-    final long totalTime = DruidIntervalUtils.extractTotalTime(intervals);
+    final long totalTime = DruidDateTimeUtils.extractTotalTime(intervals);
     long startTime = intervals.get(0).getStartMillis();
     long endTime = startTime;
     long currTime = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
index 226060f..49e096b 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
@@ -21,9 +21,9 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputSplit;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
index 70b493c..fccf7c4 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
@@ -22,8 +22,8 @@ import java.io.InputStream;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.io.NullWritable;
 
 import com.fasterxml.jackson.core.type.TypeReference;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index 8f53d4a..238f7a3 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -25,11 +25,11 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Properties;
 
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeSpec;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
index 812ae03..b91178c 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
@@ -21,8 +21,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.io.NullWritable;
 
 import com.fasterxml.jackson.core.type.TypeReference;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
index 0b87976..0b77a9b 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
@@ -22,8 +22,8 @@ import java.io.InputStream;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
 import org.apache.hadoop.io.NullWritable;
 
 import com.fasterxml.jackson.core.type.TypeReference;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5d13344..98d2dc2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -112,9 +112,10 @@
     <antlr.version>3.4</antlr.version>
     <apache-directory-server.version>1.5.6</apache-directory-server.version>
     <apache-directory-clientapi.version>0.1</apache-directory-clientapi.version>
+    <avatica.version>1.8.0</avatica.version>
     <avro.version>1.7.7</avro.version>
     <bonecp.version>0.8.0.RELEASE</bonecp.version>
-    <calcite.version>1.6.0</calcite.version>
+    <calcite.version>1.10.0</calcite.version>
     <datanucleus-api-jdo.version>4.2.1</datanucleus-api-jdo.version>
     <datanucleus-core.version>4.1.6</datanucleus-core.version>
     <datanucleus-rdbms.version>4.1.7</datanucleus-rdbms.version>

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 2a93bb7..489c6f3 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -383,8 +383,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.calcite</groupId>
-      <artifactId>calcite-avatica</artifactId>
+      <artifactId>calcite-druid</artifactId>
       <version>${calcite.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.calcite.avatica</groupId>
+      <artifactId>avatica</artifactId>
+      <version>${avatica.version}</version>
       <exclusions>
         <!-- hsqldb interferes with the use of derby as the default db
           in hive's use of datanucleus.

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
index c0609d7..75fb916 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java
@@ -77,7 +77,7 @@ public class HiveDefaultRelMetadataProvider {
                     HiveRelMdDistribution.SOURCE,
                     HiveRelMdCollation.SOURCE,
                     HiveRelMdPredicates.SOURCE,
-                    new DefaultRelMetadataProvider()));
+                    DefaultRelMetadataProvider.INSTANCE));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
index 890aea1..8beb0dd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
@@ -19,19 +19,15 @@ package org.apache.hadoop.hive.ql.optimizer.calcite;
 
 import org.apache.calcite.plan.Context;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.HiveDruidConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
 
 
 public class HivePlannerContext implements Context {
   private HiveAlgorithmsConf algoConfig;
-  private HiveDruidConf druidConf;
   private HiveRulesRegistry registry;
 
-  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveDruidConf druidConf,
-          HiveRulesRegistry registry) {
+  public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry) {
     this.algoConfig = algoConfig;
-    this.druidConf = druidConf;
     this.registry = registry;
   }
 
@@ -39,9 +35,6 @@ public class HivePlannerContext implements Context {
     if (clazz.isInstance(algoConfig)) {
       return clazz.cast(algoConfig);
     }
-    if (clazz.isInstance(druidConf)) {
-      return clazz.cast(druidConf);
-    }
     if (clazz.isInstance(registry)) {
       return clazz.cast(registry);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelBuilder.java
index 1c64d64..bc160d8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelBuilder.java
@@ -81,7 +81,7 @@ public class HiveRelBuilder extends RelBuilder {
 
   @Override
   public RelBuilder filter(Iterable<? extends RexNode> predicates) {
-    final RexNode x = HiveRexUtil.simplify(cluster.getRexBuilder(),
+    final RexNode x = RexUtil.simplify(cluster.getRexBuilder(),
             RexUtil.composeConjunction(cluster.getRexBuilder(), predicates, false));
     if (!x.isAlwaysTrue()) {
       final RelNode input = build();
@@ -91,4 +91,20 @@ public class HiveRelBuilder extends RelBuilder {
     return this;
   }
 
+  /**
+   * Empty relationship can be expressed in many different ways, e.g.,
+   * filter(cond=false), empty LogicalValues(), etc. Calcite default implementation
+   * uses empty LogicalValues(); however, currently there is not an equivalent to
+   * this expression in Hive. Thus, we use limit 0, since Hive already includes
+   * optimizations that will do early pruning of the result tree when it is found,
+   * e.g., GlobalLimitOptimizer.
+   */
+  @Override
+  public RelBuilder empty() {
+    final RelNode input = build();
+    final RelNode sort = HiveRelFactories.HIVE_SORT_FACTORY.createSort(
+            input, RelCollations.of(), null, literal(0));
+    return this.push(sort);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index 4c154d0..50fbb78 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -24,7 +24,6 @@ import java.util.List;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
@@ -34,6 +33,7 @@ import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
@@ -314,12 +314,12 @@ public class HiveRelOptUtil extends RelOptUtil {
    *
    * <p>Optimizes if the fields are the identity projection.
    *
-   * @param factory ProjectFactory
+   * @param relBuilder RelBuilder
    * @param child Input relational expression
    * @param posList Source of each projected field
    * @return Relational expression that projects given fields
    */
-  public static RelNode createProject(final RelFactories.ProjectFactory factory,
+  public static RelNode createProject(final RelBuilder relBuilder,
       final RelNode child, final List<Integer> posList) {
     RelDataType rowType = child.getRowType();
     final List<String> fieldNames = rowType.getFieldNames();
@@ -344,7 +344,7 @@ public class HiveRelOptUtil extends RelOptUtil {
             final int pos = posList.get(index);
             return fieldNames.get(pos);
           }
-        }, true, factory);
+        }, true, relBuilder);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
deleted file mode 100644
index 15707c1..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
+++ /dev/null
@@ -1,821 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.util.ControlFlowException;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimap;
-
-
-public class HiveRexUtil {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(HiveRexUtil.class);
-
-
-  /** Converts an expression to conjunctive normal form (CNF).
-   *
-   * <p>The following expression is in CNF:
-   *
-   * <blockquote>(a OR b) AND (c OR d)</blockquote>
-   *
-   * <p>The following expression is not in CNF:
-   *
-   * <blockquote>(a AND b) OR c</blockquote>
-   *
-   * but can be converted to CNF:
-   *
-   * <blockquote>(a OR c) AND (b OR c)</blockquote>
-   *
-   * <p>The following expression is not in CNF:
-   *
-   * <blockquote>NOT (a OR NOT b)</blockquote>
-   *
-   * but can be converted to CNF by applying de Morgan's theorem:
-   *
-   * <blockquote>NOT a AND b</blockquote>
-   *
-   * <p>Expressions not involving AND, OR or NOT at the top level are in CNF.
-   */
-  public static RexNode toCnf(RexBuilder rexBuilder, RexNode rex) {
-    return new CnfHelper(rexBuilder).toCnf(rex);
-  }
-
-  public static RexNode toCnf(RexBuilder rexBuilder, int maxCNFNodeCount, RexNode rex) {
-    return new CnfHelper(rexBuilder, maxCNFNodeCount).toCnf(rex);
-  }
-
-  /** Helps {@link org.apache.calcite.rex.RexUtil#toCnf}. */
-  private static class CnfHelper {
-    final RexBuilder rexBuilder;
-    int currentCount;
-    final int maxNodeCount;
-
-    private CnfHelper(RexBuilder rexBuilder) {
-      this(rexBuilder, Integer.MAX_VALUE);
-    }
-
-    private CnfHelper(RexBuilder rexBuilder, int maxNodeCount) {
-      this.rexBuilder = rexBuilder;
-      this.maxNodeCount = maxNodeCount == -1 ? Integer.MAX_VALUE : maxNodeCount;
-    }
-
-    public RexNode toCnf(RexNode rex) {
-      try {
-        this.currentCount = 0;
-        return toCnf2(rex);
-      } catch (OverflowError e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Transformation to CNF not carried out as number of resulting nodes "
-                  + "in expression is greater than the max number of nodes allowed");
-        }
-        Util.swallow(e, null);
-        return rex;
-      }
-    }
-
-    private RexNode toCnf2(RexNode rex) {
-      final List<RexNode> operands;
-      switch (rex.getKind()) {
-      case AND:
-        incrementAndCheck();
-        operands = RexUtil.flattenAnd(((RexCall) rex).getOperands());
-        final List<RexNode> cnfOperands = Lists.newArrayList();
-        for (RexNode node : operands) {
-          RexNode cnf = toCnf2(node);
-          switch (cnf.getKind()) {
-          case AND:
-            incrementAndCheck();
-            cnfOperands.addAll(((RexCall) cnf).getOperands());
-            break;
-          default:
-            incrementAndCheck();
-            cnfOperands.add(cnf);
-          }
-        }
-        return and(cnfOperands);
-      case OR:
-        incrementAndCheck();
-        operands = RexUtil.flattenOr(((RexCall) rex).getOperands());
-        final RexNode head = operands.get(0);
-        final RexNode headCnf = toCnf2(head);
-        final List<RexNode> headCnfs = RelOptUtil.conjunctions(headCnf);
-        final RexNode tail = or(Util.skip(operands));
-        final RexNode tailCnf = toCnf2(tail);
-        final List<RexNode> tailCnfs = RelOptUtil.conjunctions(tailCnf);
-        final List<RexNode> list = Lists.newArrayList();
-        for (RexNode h : headCnfs) {
-          for (RexNode t : tailCnfs) {
-            list.add(or(ImmutableList.of(h, t)));
-          }
-        }
-        return and(list);
-      case NOT:
-        final RexNode arg = ((RexCall) rex).getOperands().get(0);
-        switch (arg.getKind()) {
-        case NOT:
-          return toCnf2(((RexCall) arg).getOperands().get(0));
-        case OR:
-          operands = ((RexCall) arg).getOperands();
-          List<RexNode> transformedDisj = new ArrayList<>();
-          for (RexNode input : RexUtil.flattenOr(operands)) {
-            transformedDisj.add(rexBuilder.makeCall(input.getType(), SqlStdOperatorTable.NOT,
-                    ImmutableList.of(input)));
-          }
-          return toCnf2(and(transformedDisj));
-        case AND:
-          operands = ((RexCall) arg).getOperands();
-          List<RexNode> transformedConj = new ArrayList<>();
-          for (RexNode input : RexUtil.flattenAnd(operands)) {
-            transformedConj.add(rexBuilder.makeCall(input.getType(), SqlStdOperatorTable.NOT,
-                    ImmutableList.of(input)));
-          }
-          return toCnf2(or(transformedConj));
-        default:
-          incrementAndCheck();
-          return rex;
-        }
-      default:
-        incrementAndCheck();
-        return rex;
-      }
-    }
-
-    private RexNode and(Iterable<? extends RexNode> nodes) {
-      return RexUtil.composeConjunction(rexBuilder, nodes, false);
-    }
-
-    private RexNode or(Iterable<? extends RexNode> nodes) {
-      return RexUtil.composeDisjunction(rexBuilder, nodes, false);
-    }
-
-    private void incrementAndCheck() {
-      this.currentCount++;
-      if (this.currentCount > this.maxNodeCount) {
-        throw OverflowError.INSTANCE;
-      }
-    }
-
-    @SuppressWarnings("serial")
-    private static class OverflowError extends ControlFlowException {
-
-      public static final OverflowError INSTANCE = new OverflowError();
-
-      private OverflowError() {}
-    }
-  }
-
-
-  /**
-   * Simplifies a boolean expression.
-   *
-   * <p>In particular:</p>
-   * <ul>
-   * <li>{@code simplify(x = 1 AND y = 2 AND NOT x = 1)}
-   * returns {@code y = 2}</li>
-   * <li>{@code simplify(x = 1 AND FALSE)}
-   * returns {@code FALSE}</li>
-   * </ul>
-   */
-  public static RexNode simplify(RexBuilder rexBuilder, RexNode e) {
-    return simplify(rexBuilder, e, false);
-  }
-
-  public static RexNode simplify(RexBuilder rexBuilder, RexNode e,
-          boolean unknownAsFalse) {
-    switch (e.getKind()) {
-    case AND:
-      return simplifyAnd(rexBuilder, (RexCall) e, unknownAsFalse);
-    case OR:
-      return simplifyOr(rexBuilder, (RexCall) e);
-    case NOT:
-      return simplifyNot(rexBuilder, (RexCall) e);
-    case CASE:
-      return simplifyCase(rexBuilder, (RexCall) e, unknownAsFalse);
-    case IS_NULL:
-      return ((RexCall) e).getOperands().get(0).getType().isNullable()
-          ? e : rexBuilder.makeLiteral(false);
-    case IS_NOT_NULL:
-      return ((RexCall) e).getOperands().get(0).getType().isNullable()
-          ? e : rexBuilder.makeLiteral(true);
-    default:
-      return e;
-    }
-  }
-
-  private static RexNode simplifyNot(RexBuilder rexBuilder, RexCall call) {
-    final RexNode a = call.getOperands().get(0);
-    switch (a.getKind()) {
-    case NOT:
-      // NOT NOT x ==> x
-      return simplify(rexBuilder, ((RexCall) a).getOperands().get(0));
-    }
-    final SqlKind negateKind = a.getKind().negate();
-    if (a.getKind() != negateKind) {
-      return simplify(rexBuilder,
-          rexBuilder.makeCall(op(negateKind),
-              ImmutableList.of(((RexCall) a).getOperands().get(0))));
-    }
-    final SqlKind negateKind2 = negate(a.getKind());
-    if (a.getKind() != negateKind2) {
-      return simplify(rexBuilder,
-          rexBuilder.makeCall(op(negateKind2), ((RexCall) a).getOperands()));
-    }
-    if (a.getKind() == SqlKind.AND) {
-      // NOT distributivity for AND
-      final List<RexNode> newOperands = new ArrayList<>();
-      for (RexNode operand : ((RexCall) a).getOperands()) {
-        newOperands.add(simplify(rexBuilder,
-            rexBuilder.makeCall(SqlStdOperatorTable.NOT, operand)));
-      }
-      return simplify(rexBuilder,
-          rexBuilder.makeCall(SqlStdOperatorTable.OR, newOperands));
-    }
-    if (a.getKind() == SqlKind.OR) {
-      // NOT distributivity for OR
-      final List<RexNode> newOperands = new ArrayList<>();
-      for (RexNode operand : ((RexCall) a).getOperands()) {
-        newOperands.add(simplify(rexBuilder,
-            rexBuilder.makeCall(SqlStdOperatorTable.NOT, operand)));
-      }
-      return simplify(rexBuilder,
-          rexBuilder.makeCall(SqlStdOperatorTable.AND, newOperands));
-    }
-    return call;
-  }
-
-  private static RexNode simplifyCase(RexBuilder rexBuilder, RexCall call,
-          boolean unknownAsFalse) {
-    final List<RexNode> operands = call.getOperands();
-    final List<RexNode> newOperands = new ArrayList<>();
-    final Set<String> values = new HashSet<>();
-    for (int i = 0; i < operands.size(); i++) {
-      RexNode operand = operands.get(i);
-      if (RexUtil.isCasePredicate(call, i)) {
-        if (operand.isAlwaysTrue()) {
-          // Predicate is always TRUE. Make value the ELSE and quit.
-          newOperands.add(operands.get(i + 1));
-          if (unknownAsFalse && RexUtil.isNull(operands.get(i + 1))) {
-            values.add(rexBuilder.makeLiteral(false).toString());
-          } else {
-            values.add(operands.get(i + 1).toString());
-          }
-          break;
-        } else if (operand.isAlwaysFalse() || RexUtil.isNull(operand)) {
-          // Predicate is always FALSE or NULL. Skip predicate and value.
-          ++i;
-          continue;
-        }
-      } else {
-        if (unknownAsFalse && RexUtil.isNull(operand)) {
-          values.add(rexBuilder.makeLiteral(false).toString());
-        } else {
-          values.add(operand.toString());
-        }
-      }
-      newOperands.add(operand);
-    }
-    assert newOperands.size() % 2 == 1;
-    if (newOperands.size() == 1 || values.size() == 1) {
-      return rexBuilder.makeCast(call.getType(), newOperands.get(newOperands.size() - 1));
-    }
-  trueFalse:
-    if (call.getType().getSqlTypeName() == SqlTypeName.BOOLEAN) {
-      // Optimize CASE where every branch returns constant true or constant
-      // false.
-      final List<Pair<RexNode, RexNode>> pairs =
-          casePairs(rexBuilder, newOperands);
-      // 1) Possible simplification if unknown is treated as false:
-      //   CASE
-      //   WHEN p1 THEN TRUE
-      //   WHEN p2 THEN TRUE
-      //   ELSE FALSE
-      //   END
-      // can be rewritten to: (p1 or p2)
-      if (unknownAsFalse) {
-        final List<RexNode> terms = new ArrayList<>();
-        int pos = 0;
-        for (; pos < pairs.size(); pos++) {
-          // True block
-          Pair<RexNode, RexNode> pair = pairs.get(pos);
-          if (!pair.getValue().isAlwaysTrue()) {
-            break;
-          }
-          terms.add(pair.getKey());
-        }
-        for (; pos < pairs.size(); pos++) {
-          // False block
-          Pair<RexNode, RexNode> pair = pairs.get(pos);
-          if (!pair.getValue().isAlwaysFalse() && !RexUtil.isNull(pair.getValue())) {
-            break;
-          }
-        }
-        if (pos == pairs.size()) {
-          return RexUtil.composeDisjunction(rexBuilder, terms, false);
-        }
-      }
-      // 2) Another simplification
-      //   CASE
-      //   WHEN p1 THEN TRUE
-      //   WHEN p2 THEN FALSE
-      //   WHEN p3 THEN TRUE
-      //   ELSE FALSE
-      //   END
-      // if p1...pn cannot be nullable
-      for (Ord<Pair<RexNode, RexNode>> pair : Ord.zip(pairs)) {
-        if (pair.e.getKey().getType().isNullable()) {
-          break trueFalse;
-        }
-        if (!pair.e.getValue().isAlwaysTrue()
-            && !pair.e.getValue().isAlwaysFalse()
-            && (!unknownAsFalse || !RexUtil.isNull(pair.e.getValue()))) {
-          break trueFalse;
-        }
-      }
-      final List<RexNode> terms = new ArrayList<>();
-      final List<RexNode> notTerms = new ArrayList<>();
-      for (Ord<Pair<RexNode, RexNode>> pair : Ord.zip(pairs)) {
-        if (pair.e.getValue().isAlwaysTrue()) {
-          terms.add(RexUtil.andNot(rexBuilder, pair.e.getKey(), notTerms));
-        } else {
-          notTerms.add(pair.e.getKey());
-        }
-      }
-      return RexUtil.composeDisjunction(rexBuilder, terms, false);
-    }
-    if (newOperands.equals(operands)) {
-      return call;
-    }
-    return call.clone(call.getType(), newOperands);
-  }
-
-  /** Given "CASE WHEN p1 THEN v1 ... ELSE e END"
-   * returns [(p1, v1), ..., (true, e)]. */
-  private static List<Pair<RexNode, RexNode>> casePairs(RexBuilder rexBuilder,
-      List<RexNode> operands) {
-    final ImmutableList.Builder<Pair<RexNode, RexNode>> builder =
-        ImmutableList.builder();
-    for (int i = 0; i < operands.size() - 1; i += 2) {
-      builder.add(Pair.of(operands.get(i), operands.get(i + 1)));
-    }
-    builder.add(
-        Pair.of((RexNode) rexBuilder.makeLiteral(true), Util.last(operands)));
-    return builder.build();
-  }
-
-  public static RexNode simplifyAnd(RexBuilder rexBuilder, RexCall e,
-          boolean unknownAsFalse) {
-    final List<RexNode> terms = new ArrayList<>();
-    final List<RexNode> notTerms = new ArrayList<>();
-    RelOptUtil.decomposeConjunction(e, terms, notTerms);
-    if (unknownAsFalse) {
-      return simplifyAnd2ForUnknownAsFalse(rexBuilder, terms, notTerms);
-    }
-    return simplifyAnd2(rexBuilder, terms, notTerms);
-  }
-
-  public static RexNode simplifyAnd2(RexBuilder rexBuilder,
-      List<RexNode> terms, List<RexNode> notTerms) {
-    for (RexNode term : terms) {
-      if (term.isAlwaysFalse()) {
-        return rexBuilder.makeLiteral(false);
-      }
-    }
-    if (terms.isEmpty() && notTerms.isEmpty()) {
-      return rexBuilder.makeLiteral(true);
-    }
-    if (terms.size() == 1 && notTerms.isEmpty()) {
-      // Make sure "x OR y OR x" (a single-term conjunction) gets simplified.
-      return simplify(rexBuilder, terms.get(0));
-    }
-    // If one of the not-disjunctions is a disjunction that is wholly
-    // contained in the disjunctions list, the expression is not
-    // satisfiable.
-    //
-    // Example #1. x AND y AND z AND NOT (x AND y)  - not satisfiable
-    // Example #2. x AND y AND NOT (x AND y)        - not satisfiable
-    // Example #3. x AND y AND NOT (x AND y AND z)  - may be satisfiable
-    for (RexNode notDisjunction : notTerms) {
-      final List<RexNode> terms2 = RelOptUtil.conjunctions(notDisjunction);
-      if (terms.containsAll(terms2)) {
-        return rexBuilder.makeLiteral(false);
-      }
-    }
-    // Add the NOT disjunctions back in.
-    for (RexNode notDisjunction : notTerms) {
-      terms.add(
-          simplify(rexBuilder,
-              rexBuilder.makeCall(SqlStdOperatorTable.NOT, notDisjunction)));
-    }
-    return RexUtil.composeConjunction(rexBuilder, terms, false);
-  }
-
-  /** As {@link #simplifyAnd2(RexBuilder, List, List)} but we assume that if the expression returns
-   * UNKNOWN it will be interpreted as FALSE. */
-  public static RexNode simplifyAnd2ForUnknownAsFalse(RexBuilder rexBuilder,
-      List<RexNode> terms, List<RexNode> notTerms) {
-    for (RexNode term : terms) {
-      if (term.isAlwaysFalse()) {
-        return rexBuilder.makeLiteral(false);
-      }
-    }
-    if (terms.isEmpty() && notTerms.isEmpty()) {
-      return rexBuilder.makeLiteral(true);
-    }
-    if (terms.size() == 1 && notTerms.isEmpty()) {
-      // Make sure "x OR y OR x" (a single-term conjunction) gets simplified.
-      return simplify(rexBuilder, terms.get(0), true);
-    }
-    // Try to simplify the expression
-    final Multimap<String,Pair<String,RexNode>> equalityTerms = ArrayListMultimap.create();
-    final Map<String,String> equalityConstantTerms = new HashMap<>();
-    final Set<String> negatedTerms = new HashSet<>();
-    final Set<String> nullOperands = new HashSet<>();
-    final Set<RexNode> notNullOperands = new LinkedHashSet<>();
-    final Set<String> comparedOperands = new HashSet<>();
-    for (int i = 0; i < terms.size(); i++) {
-      RexNode term = terms.get(i);
-      if (!HiveCalciteUtil.isDeterministic(term)) {
-        continue;
-      }
-      // Simplify BOOLEAN expressions if possible
-      while (term.getKind() == SqlKind.EQUALS) {
-        RexCall call = (RexCall) term;
-        if (call.getOperands().get(0).isAlwaysTrue()) {
-          term = call.getOperands().get(1);
-          terms.remove(i);
-          terms.add(i, term);
-          continue;
-        } else if (call.getOperands().get(1).isAlwaysTrue()) {
-          term = call.getOperands().get(0);
-          terms.remove(i);
-          terms.add(i, term);
-          continue;
-        }
-        break;
-      }
-      switch (term.getKind()) {
-      case EQUALS:
-      case NOT_EQUALS:
-      case LESS_THAN:
-      case GREATER_THAN:
-      case LESS_THAN_OR_EQUAL:
-      case GREATER_THAN_OR_EQUAL:
-        RexCall call = (RexCall) term;
-        RexNode left = call.getOperands().get(0);
-        comparedOperands.add(left.toString());
-        RexCall leftCast = null;
-        // if it is a cast, we include the inner reference
-        if (left.getKind() == SqlKind.CAST) {
-          leftCast = (RexCall) left;
-          comparedOperands.add(leftCast.getOperands().get(0).toString());
-        }
-        RexNode right = call.getOperands().get(1);
-        comparedOperands.add(right.toString());
-        RexCall rightCast = null;
-        // if it is a cast, we include the inner reference
-        if (right.getKind() == SqlKind.CAST) {
-          rightCast = (RexCall) right;
-          comparedOperands.add(rightCast.getOperands().get(0).toString());
-        }
-        // Check for equality on different constants. If the same ref or CAST(ref)
-        // is equal to different constants, this condition cannot be satisfied,
-        // and hence it can be evaluated to FALSE
-        if (term.getKind() == SqlKind.EQUALS) {
-          boolean leftRef = left instanceof RexInputRef ||
-                  (leftCast != null && leftCast.getOperands().get(0) instanceof RexInputRef);
-          boolean rightRef = right instanceof RexInputRef ||
-                  (rightCast != null && rightCast.getOperands().get(0) instanceof RexInputRef);
-          if (right instanceof RexLiteral && leftRef) {
-            final String literal = right.toString();
-            final String prevLiteral = equalityConstantTerms.put(left.toString(), literal);
-            if (prevLiteral != null && !literal.equals(prevLiteral)) {
-              return rexBuilder.makeLiteral(false);
-            }
-          } else if (left instanceof RexLiteral && rightRef) {
-            final String literal = left.toString();
-            final String prevLiteral = equalityConstantTerms.put(right.toString(), literal);
-            if (prevLiteral != null && !literal.equals(prevLiteral)) {
-              return rexBuilder.makeLiteral(false);
-            }
-          } else if (leftRef && rightRef) {
-            equalityTerms.put(left.toString(), Pair.of(right.toString(), term));
-          }
-        }
-        // Assume the expression a > 5 is part of a Filter condition.
-        // Then we can derive the negated term: a <= 5.
-        // But as the comparison is string based and thus operands order dependent,
-        // we should also add the inverted negated term: 5 >= a.
-        // Observe that for creating the inverted term we invert the list of operands.
-        RexNode negatedTerm = negate(rexBuilder, call);
-        if (negatedTerm != null) {
-          negatedTerms.add(negatedTerm.toString());
-          RexNode invertNegatedTerm = invert(rexBuilder, (RexCall) negatedTerm);
-          if (invertNegatedTerm != null) {
-            negatedTerms.add(invertNegatedTerm.toString());
-          }
-        }
-        break;
-      case IN:
-        comparedOperands.add(((RexCall) term).operands.get(0).toString());
-        break;
-      case BETWEEN:
-        comparedOperands.add(((RexCall) term).operands.get(1).toString());
-        break;
-      case IS_NOT_NULL:
-        notNullOperands.add(((RexCall) term).getOperands().get(0));
-        terms.remove(i);
-        --i;
-        break;
-      case IS_NULL:
-        nullOperands.add(((RexCall) term).getOperands().get(0).toString());
-      }
-    }
-    // If one column should be null and is in a comparison predicate,
-    // it is not satisfiable.
-    // Example. IS NULL(x) AND x < 5  - not satisfiable
-    if (!Collections.disjoint(nullOperands, comparedOperands)) {
-      return rexBuilder.makeLiteral(false);
-    }
-    // Check for equality of two refs wrt equality with constants
-    // Example #1. x=5 AND y=5 AND x=y : x=5 AND y=5
-    // Example #2. x=5 AND y=6 AND x=y - not satisfiable
-    for (String ref1 : equalityTerms.keySet()) {
-      final String literal1 = equalityConstantTerms.get(ref1);
-      if (literal1 == null) {
-        continue;
-      }
-      Collection<Pair<String, RexNode>> references = equalityTerms.get(ref1);
-      for (Pair<String,RexNode> ref2 : references) {
-        final String literal2 = equalityConstantTerms.get(ref2.left);
-        if (literal2 == null) {
-          continue;
-        }
-        if (!literal1.equals(literal2)) {
-          // If an expression is equal to two different constants,
-          // it is not satisfiable
-          return rexBuilder.makeLiteral(false);
-        }
-        // Otherwise we can remove the term, as we already know that
-        // the expression is equal to two constants
-        terms.remove(ref2.right);
-      }
-    }
-    // Remove not necessary IS NOT NULL expressions.
-    //
-    // Example. IS NOT NULL(x) AND x < 5  : x < 5
-    for (RexNode operand : notNullOperands) {
-      if (!comparedOperands.contains(operand.toString())) {
-        terms.add(
-            rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, operand));
-      }
-    }
-    // If one of the not-disjunctions is a disjunction that is wholly
-    // contained in the disjunctions list, the expression is not
-    // satisfiable.
-    //
-    // Example #1. x AND y AND z AND NOT (x AND y)  - not satisfiable
-    // Example #2. x AND y AND NOT (x AND y)        - not satisfiable
-    // Example #3. x AND y AND NOT (x AND y AND z)  - may be satisfiable
-    final Set<String> termsSet = new HashSet<String>(
-            Lists.transform(terms, HiveCalciteUtil.REX_STR_FN));
-    for (RexNode notDisjunction : notTerms) {
-      if (!HiveCalciteUtil.isDeterministic(notDisjunction)) {
-        continue;
-      }
-      final List<String> terms2Set = Lists.transform(
-              RelOptUtil.conjunctions(notDisjunction), HiveCalciteUtil.REX_STR_FN);
-      if (termsSet.containsAll(terms2Set)) {
-        return rexBuilder.makeLiteral(false);
-      }
-    }
-    // Add the NOT disjunctions back in.
-    for (RexNode notDisjunction : notTerms) {
-      terms.add(
-          simplify(rexBuilder,
-              rexBuilder.makeCall(SqlStdOperatorTable.NOT, notDisjunction), true));
-    }
-    // The negated terms: only deterministic expressions
-    for (String negatedTerm : negatedTerms) {
-      if (termsSet.contains(negatedTerm)) {
-        return rexBuilder.makeLiteral(false);
-      }
-    }
-    return RexUtil.composeConjunction(rexBuilder, terms, false);
-  }
-
-  /** Simplifies OR(x, x) into x, and similar. */
-  public static RexNode simplifyOr(RexBuilder rexBuilder, RexCall call) {
-    assert call.getKind() == SqlKind.OR;
-    final List<RexNode> terms = RelOptUtil.disjunctions(call);
-    for (int i = 0; i < terms.size(); i++) {
-      final RexNode term = terms.get(i);
-      switch (term.getKind()) {
-      case LITERAL:
-        if (!RexLiteral.isNullLiteral(term)) {
-          if (RexLiteral.booleanValue(term)) {
-            return term; // true
-          } else {
-            terms.remove(i);
-            --i;
-          }
-        }
-      }
-    }
-    return RexUtil.composeDisjunction(rexBuilder, terms, false);
-  }
-
-  private static RexCall negate(RexBuilder rexBuilder, RexCall call) {
-    switch (call.getKind()) {
-      case EQUALS:
-      case NOT_EQUALS:
-      case LESS_THAN:
-      case GREATER_THAN:
-      case LESS_THAN_OR_EQUAL:
-      case GREATER_THAN_OR_EQUAL:
-        return (RexCall) rexBuilder.makeCall(op(negate(call.getKind())), call.getOperands());
-    }
-    return null;
-  }
-
-  private static SqlKind negate(SqlKind kind) {
-    switch (kind) {
-      case EQUALS:
-        return SqlKind.NOT_EQUALS;
-      case NOT_EQUALS:
-        return SqlKind.EQUALS;
-      case LESS_THAN:
-        return SqlKind.GREATER_THAN_OR_EQUAL;
-      case GREATER_THAN:
-        return SqlKind.LESS_THAN_OR_EQUAL;
-      case LESS_THAN_OR_EQUAL:
-        return SqlKind.GREATER_THAN;
-      case GREATER_THAN_OR_EQUAL:
-        return SqlKind.LESS_THAN;
-    }
-    return kind;
-  }
-
-  private static RexCall invert(RexBuilder rexBuilder, RexCall call) {
-    switch (call.getKind()) {
-      case EQUALS:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
-                Lists.reverse(call.getOperands()));
-      case NOT_EQUALS:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.NOT_EQUALS,
-                Lists.reverse(call.getOperands()));
-      case LESS_THAN:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN,
-                Lists.reverse(call.getOperands()));
-      case GREATER_THAN:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN,
-                Lists.reverse(call.getOperands()));
-      case LESS_THAN_OR_EQUAL:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
-                Lists.reverse(call.getOperands()));
-      case GREATER_THAN_OR_EQUAL:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
-                Lists.reverse(call.getOperands()));
-    }
-    return null;
-  }
-
-  private static SqlOperator op(SqlKind kind) {
-    switch (kind) {
-    case IS_FALSE:
-      return SqlStdOperatorTable.IS_FALSE;
-    case IS_TRUE:
-      return SqlStdOperatorTable.IS_TRUE;
-    case IS_UNKNOWN:
-      return SqlStdOperatorTable.IS_UNKNOWN;
-    case IS_NULL:
-      return SqlStdOperatorTable.IS_NULL;
-    case IS_NOT_FALSE:
-      return SqlStdOperatorTable.IS_NOT_FALSE;
-    case IS_NOT_TRUE:
-      return SqlStdOperatorTable.IS_NOT_TRUE;
-    case IS_NOT_NULL:
-      return SqlStdOperatorTable.IS_NOT_NULL;
-    case EQUALS:
-      return SqlStdOperatorTable.EQUALS;
-    case NOT_EQUALS:
-      return SqlStdOperatorTable.NOT_EQUALS;
-    case LESS_THAN:
-      return SqlStdOperatorTable.LESS_THAN;
-    case GREATER_THAN:
-      return SqlStdOperatorTable.GREATER_THAN;
-    case LESS_THAN_OR_EQUAL:
-      return SqlStdOperatorTable.LESS_THAN_OR_EQUAL;
-    case GREATER_THAN_OR_EQUAL:
-      return SqlStdOperatorTable.GREATER_THAN_OR_EQUAL;
-    default:
-      throw new AssertionError(kind);
-    }
-  }
-
-  public static SqlKind invert(SqlKind kind) {
-    switch (kind) {
-      case EQUALS:
-        return SqlKind.EQUALS;
-      case NOT_EQUALS:
-        return SqlKind.NOT_EQUALS;
-      case LESS_THAN:
-        return SqlKind.GREATER_THAN;
-      case GREATER_THAN:
-        return SqlKind.LESS_THAN;
-      case LESS_THAN_OR_EQUAL:
-        return SqlKind.GREATER_THAN_OR_EQUAL;
-      case GREATER_THAN_OR_EQUAL:
-        return SqlKind.LESS_THAN_OR_EQUAL;
-    }
-    return null;
-  }
-
-  public static class ExprSimplifier extends RexShuttle {
-    private final RexBuilder rexBuilder;
-    private final boolean unknownAsFalse;
-    private final Map<RexNode,Boolean> unknownAsFalseMap;
-
-    public ExprSimplifier(RexBuilder rexBuilder, boolean unknownAsFalse) {
-      this.rexBuilder = rexBuilder;
-      this.unknownAsFalse = unknownAsFalse;
-      this.unknownAsFalseMap = new HashMap<>();
-    }
-
-    @Override
-    public RexNode visitCall(RexCall call) {
-      Boolean unknownAsFalseCall = unknownAsFalse;
-      if (unknownAsFalseCall) {
-        switch (call.getKind()) {
-        case AND:
-        case CASE:
-          unknownAsFalseCall = this.unknownAsFalseMap.get(call);
-          if (unknownAsFalseCall == null) {
-            // Top operator
-            unknownAsFalseCall = true;
-          }
-          break;
-        default:
-          unknownAsFalseCall = false;
-        }
-        for (RexNode operand : call.operands) {
-          this.unknownAsFalseMap.put(operand, unknownAsFalseCall);
-        }
-      }
-      RexNode node = super.visitCall(call);
-      RexNode simplifiedNode = HiveRexUtil.simplify(rexBuilder, node, unknownAsFalseCall);
-      if (node == simplifiedNode) {
-        return node;
-      }
-      if (simplifiedNode.getType().equals(call.getType())) {
-        return simplifiedNode;
-      }
-      return rexBuilder.makeCast(call.getType(), simplifiedNode, true);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
index 10fdcc6..279d101 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
@@ -36,8 +36,19 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
     switch (typeName) {
     case DECIMAL:
       return getMaxNumericScale();
-    case INTERVAL_DAY_TIME:
+    case INTERVAL_YEAR:
+    case INTERVAL_MONTH:
     case INTERVAL_YEAR_MONTH:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_SECOND:
       return SqlTypeName.MAX_INTERVAL_FRACTIONAL_SECOND_PRECISION;
     default:
       return -1;
@@ -58,8 +69,19 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
       return getMaxPrecision(typeName);
     case DECIMAL:
       return DEFAULT_DECIMAL_PRECISION;
-    case INTERVAL_DAY_TIME:
+    case INTERVAL_YEAR:
+    case INTERVAL_MONTH:
     case INTERVAL_YEAR_MONTH:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_SECOND:
       return SqlTypeName.DEFAULT_INTERVAL_START_PRECISION;
     default:
       return -1;
@@ -81,8 +103,19 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
     case TIME:
     case TIMESTAMP:
       return MAX_TIMESTAMP_PRECISION;
-    case INTERVAL_DAY_TIME:
+    case INTERVAL_YEAR:
+    case INTERVAL_MONTH:
     case INTERVAL_YEAR_MONTH:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_SECOND:
       return SqlTypeName.MAX_INTERVAL_START_PRECISION;
     default:
       return -1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
index badb8ca..40f2cef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveDefaultCostModel.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.cost;
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.RelDistributions;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
@@ -92,12 +93,12 @@ public class HiveDefaultCostModel extends HiveCostModel {
 
     @Override
     public ImmutableList<RelCollation> getCollation(HiveJoin join) {
-      return null;
+      return ImmutableList.of();
     }
 
     @Override
     public RelDistribution getDistribution(HiveJoin join) {
-      return null;
+      return RelDistributions.SINGLETON;
     }
 
     @Override
@@ -117,7 +118,7 @@ public class HiveDefaultCostModel extends HiveCostModel {
 
     @Override
     public Integer getSplitCount(HiveJoin join) {
-      return null;
+      return 1;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
index ed45ab3..cbea307 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveRelMdCost.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.cost;
 
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.metadata.BuiltInMetadata;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.MetadataDef;
+import org.apache.calcite.rel.metadata.MetadataHandler;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMdPercentageOriginalRows;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
@@ -34,7 +37,7 @@ import com.google.common.collect.ImmutableList;
 /**
  * HiveRelMdCost supplies the implementation of cost model.
  */
-public class HiveRelMdCost {
+public class HiveRelMdCost implements MetadataHandler<BuiltInMetadata.NonCumulativeCost> {
 
   private final HiveCostModel hiveCostModel;
 
@@ -50,6 +53,11 @@ public class HiveRelMdCost {
                    RelMdPercentageOriginalRows.SOURCE));
   }
 
+  @Override
+  public MetadataDef<BuiltInMetadata.NonCumulativeCost> getDef() {
+    return BuiltInMetadata.NonCumulativeCost.DEF;
+  }
+
   public RelOptCost getNonCumulativeCost(HiveAggregate aggregate, RelMetadataQuery mq) {
     return hiveCostModel.getAggregateCost(aggregate);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidIntervalUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidIntervalUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidIntervalUtils.java
deleted file mode 100644
index 82ab4d7..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/druid/DruidIntervalUtils.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.druid;
-
-import java.sql.Timestamp;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.commons.lang.StringUtils;
-import org.joda.time.Interval;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Function;
-import com.google.common.collect.BoundType;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Range;
-import com.google.common.collect.Sets;
-
-/** 
- * Utilities for generating intervals from RexNode.
- * 
- * Based on Navis logic implemented on Hive data structures.
- * See <a href="https://github.com/druid-io/druid/pull/2880">Druid PR-2880</a>
- * 
- */
-@SuppressWarnings({"rawtypes","unchecked"})
-public class DruidIntervalUtils {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(DruidIntervalUtils.class);
-
-
-  /**
-   * Given a list of predicates, it generates the equivalent Interval
-   * (if possible). It assumes that all the predicates in the input
-   * reference a single column : the timestamp column.
-   * 
-   * @param conjs list of conditions to use for the transformation
-   * @return interval representing the conditions in the input list
-   */
-  public static List<Interval> createInterval(RelDataType type, List<RexNode> conjs) {
-    List<Range> ranges = new ArrayList<>();
-    for (RexNode child : conjs) {
-      List<Range> extractedRanges = extractRanges(type, child, false);
-      if (extractedRanges == null || extractedRanges.isEmpty()) {
-        // We could not extract, we bail out
-        return null;
-      }
-      if (ranges.isEmpty()) {
-        ranges.addAll(extractedRanges);
-        continue;
-      }
-      List<Range> overlapped = Lists.newArrayList();
-      for (Range current : ranges) {
-        for (Range interval : extractedRanges) {
-          if (current.isConnected(interval)) {
-            overlapped.add(current.intersection(interval));
-          }
-        }
-      }
-      ranges = overlapped;
-    }
-    List<Range> compactRanges = condenseRanges(ranges);
-    LOG.debug("Inferred ranges on interval : " + compactRanges);
-    return toInterval(compactRanges);
-  }
-
-  protected static List<Interval> toInterval(List<Range> ranges) {
-    List<Interval> intervals = Lists.transform(ranges, new Function<Range, Interval>() {
-      @Override
-      public Interval apply(Range range) {
-        if (!range.hasLowerBound() && !range.hasUpperBound()) {
-          return DruidTable.DEFAULT_INTERVAL;
-        }
-        long start = range.hasLowerBound() ? toLong(range.lowerEndpoint()) :
-          DruidTable.DEFAULT_INTERVAL.getStartMillis();
-        long end = range.hasUpperBound() ? toLong(range.upperEndpoint()) :
-          DruidTable.DEFAULT_INTERVAL.getEndMillis();
-        if (range.hasLowerBound() && range.lowerBoundType() == BoundType.OPEN) {
-          start++;
-        }
-        if (range.hasUpperBound() && range.upperBoundType() == BoundType.CLOSED) {
-          end++;
-        }
-        return new Interval(start, end);
-      }
-    });
-    LOG.info("Converted time ranges " + ranges + " to interval " + intervals);
-    return intervals;
-  }
-
-  protected static List<Range> extractRanges(RelDataType type, RexNode node,
-          boolean withNot) {
-    switch (node.getKind()) {
-      case EQUALS:
-      case LESS_THAN:
-      case LESS_THAN_OR_EQUAL:
-      case GREATER_THAN:
-      case GREATER_THAN_OR_EQUAL:
-      case BETWEEN:
-      case IN:
-        return leafToRanges(type, (RexCall) node, withNot);
-
-      case NOT:
-        return extractRanges(type, ((RexCall) node).getOperands().get(0), !withNot);
-
-      case OR:
-        RexCall call = (RexCall) node;
-        List<Range> intervals = Lists.newArrayList();
-        for (RexNode child : call.getOperands()) {
-          List<Range> extracted = extractRanges(type, child, withNot);
-          if (extracted != null) {
-            intervals.addAll(extracted);
-          }
-        }
-        return intervals;
-
-      default:
-        return null;
-    }
-  }
-
-  protected static List<Range> leafToRanges(RelDataType type, RexCall call,
-          boolean withNot) {
-    switch (call.getKind()) {
-      case EQUALS:
-      case LESS_THAN:
-      case LESS_THAN_OR_EQUAL:
-      case GREATER_THAN:
-      case GREATER_THAN_OR_EQUAL:
-      {
-        RexLiteral literal = null;
-        if (call.getOperands().get(0) instanceof RexInputRef &&
-                call.getOperands().get(1) instanceof RexLiteral) {
-          literal = extractLiteral(call.getOperands().get(1));
-        } else if (call.getOperands().get(0) instanceof RexInputRef &&
-                call.getOperands().get(1).getKind() == SqlKind.CAST) {
-          literal = extractLiteral(call.getOperands().get(1));
-        } else if (call.getOperands().get(1) instanceof RexInputRef &&
-                call.getOperands().get(0) instanceof RexLiteral) {
-          literal = extractLiteral(call.getOperands().get(0));
-        } else if (call.getOperands().get(1) instanceof RexInputRef &&
-                call.getOperands().get(0).getKind() == SqlKind.CAST) {
-          literal = extractLiteral(call.getOperands().get(0));
-        }
-        if (literal == null) {
-          return null;
-        }
-        Comparable value = literalToType(literal, type);
-        if (value == null) {
-          return null;
-        }
-        if (call.getKind() == SqlKind.LESS_THAN) {
-          return Arrays.<Range> asList(withNot ? Range.atLeast(value) : Range.lessThan(value));
-        } else if (call.getKind() == SqlKind.LESS_THAN_OR_EQUAL) {
-          return Arrays.<Range> asList(withNot ? Range.greaterThan(value) : Range.atMost(value));
-        } else if (call.getKind() == SqlKind.GREATER_THAN) {
-          return Arrays.<Range> asList(withNot ? Range.atMost(value) : Range.greaterThan(value));
-        } else if (call.getKind() == SqlKind.GREATER_THAN_OR_EQUAL) {
-          return Arrays.<Range> asList(withNot ? Range.lessThan(value) : Range.atLeast(value));
-        } else { //EQUALS
-          if (!withNot) {
-            return Arrays.<Range> asList(Range.closed(value, value));
-          }
-          return Arrays.<Range> asList(Range.lessThan(value), Range.greaterThan(value));
-        }
-      }
-      case BETWEEN:
-      {
-        RexLiteral literal1 = extractLiteral(call.getOperands().get(2));
-        if (literal1 == null) {
-          return null;
-        }
-        RexLiteral literal2 = extractLiteral(call.getOperands().get(3));
-        if (literal2 == null) {
-          return null;
-        }
-        Comparable value1 = literalToType(literal1, type);
-        Comparable value2 = literalToType(literal2, type);
-        if (value1 == null || value2 == null) {
-          return null;
-        }
-        boolean inverted = value1.compareTo(value2) > 0;
-        if (!withNot) {
-          return Arrays.<Range> asList(
-                  inverted ? Range.closed(value2, value1) : Range.closed(value1, value2));
-        }
-        return Arrays.<Range> asList(Range.lessThan(inverted ? value2 : value1),
-                Range.greaterThan(inverted ? value1 : value2));
-      }
-      case IN:
-      {
-        List<Range> ranges = Lists.newArrayList();
-        for (int i = 1; i < call.getOperands().size(); i++) {
-          RexLiteral literal = extractLiteral(call.getOperands().get(i));
-          if (literal == null) {
-            return null;
-          }
-          Comparable element = literalToType(literal, type);
-          if (element == null) {
-            return null;
-          }
-          if (withNot) {
-            ranges.addAll(
-                    Arrays.<Range> asList(Range.lessThan(element), Range.greaterThan(element)));
-          } else {
-            ranges.add(Range.closed(element, element));
-          }
-        }
-        return ranges;
-      }
-      default:
-        return null;
-    }
-  }
-
-  @SuppressWarnings("incomplete-switch")
-  protected static Comparable literalToType(RexLiteral literal, RelDataType type) {
-    // Extract
-    Object value = null;
-    switch (literal.getType().getSqlTypeName()) {
-      case DATE:
-      case TIME:
-      case TIMESTAMP:
-      case INTERVAL_YEAR_MONTH:
-      case INTERVAL_DAY_TIME:
-        value = literal.getValue();
-        break;
-      case TINYINT:
-      case SMALLINT:
-      case INTEGER:
-      case BIGINT:
-      case DOUBLE:
-      case DECIMAL:
-      case FLOAT:
-      case REAL:
-      case VARCHAR:
-      case CHAR:
-      case BOOLEAN:
-        value = literal.getValue3();
-    }
-    if (value == null) {
-      return null;
-    }
-
-    // Convert
-    switch (type.getSqlTypeName()) {
-      case BIGINT:
-        return toLong(value);
-      case INTEGER:
-        return toInt(value);
-      case FLOAT:
-        return toFloat(value);
-      case DOUBLE:
-        return toDouble(value);
-      case VARCHAR:
-      case CHAR:
-        return String.valueOf(value);
-      case TIMESTAMP:
-        return toTimestamp(value);
-    }
-    return null;
-  }
-
-  private static RexLiteral extractLiteral(RexNode node) {
-    RexNode target = node;
-    if (node.getKind() == SqlKind.CAST) {
-      target = ((RexCall)node).getOperands().get(0);
-    }
-    if (!(target instanceof RexLiteral)) {
-      return null;
-    }
-    return (RexLiteral) target;
-  }
-
-  private static Comparable toTimestamp(Object literal) {
-    if (literal instanceof Timestamp) {
-      return (Timestamp) literal;
-    }
-    if (literal instanceof Date) {
-      return new Timestamp(((Date) literal).getTime());
-    }
-    if (literal instanceof Number) {
-      return new Timestamp(((Number) literal).longValue());
-    }
-    if (literal instanceof String) {
-      String string = (String) literal;
-      if (StringUtils.isNumeric(string)) {
-        return new Timestamp(Long.valueOf(string));
-      }
-      try {
-        return Timestamp.valueOf(string);
-      } catch (NumberFormatException e) {
-        // ignore
-      }
-    }
-    return null;
-  }
-
-  private static Long toLong(Object literal) {
-    if (literal instanceof Number) {
-      return ((Number) literal).longValue();
-    }
-    if (literal instanceof Date) {
-      return ((Date) literal).getTime();
-    }
-    if (literal instanceof Timestamp) {
-      return ((Timestamp) literal).getTime();
-    }
-    if (literal instanceof String) {
-      try {
-        return Long.valueOf((String) literal);
-      } catch (NumberFormatException e) {
-        // ignore
-      }
-      try {
-        return DateFormat.getDateInstance().parse((String) literal).getTime();
-      } catch (ParseException e) {
-        // best effort. ignore
-      }
-    }
-    return null;
-  }
-
-
-  private static Integer toInt(Object literal) {
-    if (literal instanceof Number) {
-      return ((Number) literal).intValue();
-    }
-    if (literal instanceof String) {
-      try {
-        return Integer.valueOf((String) literal);
-      } catch (NumberFormatException e) {
-        // ignore
-      }
-    }
-    return null;
-  }
-
-  private static Float toFloat(Object literal) {
-    if (literal instanceof Number) {
-      return ((Number) literal).floatValue();
-    }
-    if (literal instanceof String) {
-      try {
-        return Float.valueOf((String) literal);
-      } catch (NumberFormatException e) {
-        // ignore
-      }
-    }
-    return null;
-  }
-
-  private static Double toDouble(Object literal) {
-    if (literal instanceof Number) {
-      return ((Number) literal).doubleValue();
-    }
-    if (literal instanceof String) {
-      try {
-        return Double.valueOf((String) literal);
-      } catch (NumberFormatException e) {
-        // ignore
-      }
-    }
-    return null;
-  }
-
-  protected static List<Range> condenseRanges(List<Range> ranges) {
-    if (ranges.size() <= 1) {
-      return ranges;
-    }
-
-    Comparator<Range> startThenEnd = new Comparator<Range>() {
-      @Override
-      public int compare(Range lhs, Range rhs) {
-        int compare = 0;
-        if (lhs.hasLowerBound() && rhs.hasLowerBound()) {
-          compare = lhs.lowerEndpoint().compareTo(rhs.lowerEndpoint());
-        } else if (!lhs.hasLowerBound() && rhs.hasLowerBound()) {
-          compare = -1;
-        } else if (lhs.hasLowerBound() && !rhs.hasLowerBound()) {
-          compare = 1;
-        }
-        if (compare != 0) {
-          return compare;
-        }
-        if (lhs.hasUpperBound() && rhs.hasUpperBound()) {
-          compare = lhs.upperEndpoint().compareTo(rhs.upperEndpoint());
-        } else if (!lhs.hasUpperBound() && rhs.hasUpperBound()) {
-          compare = -1;
-        } else if (lhs.hasUpperBound() && !rhs.hasUpperBound()) {
-          compare = 1;
-        }
-        return compare;
-      }
-    };
-
-    TreeSet<Range> sortedIntervals = Sets.newTreeSet(startThenEnd);
-    sortedIntervals.addAll(ranges);
-
-    List<Range> retVal = Lists.newArrayList();
-
-    Iterator<Range> intervalsIter = sortedIntervals.iterator();
-    Range currInterval = intervalsIter.next();
-    while (intervalsIter.hasNext()) {
-      Range next = intervalsIter.next();
-      if (currInterval.encloses(next)) {
-        continue;
-      }
-      if (mergeable(currInterval, next)) {
-        currInterval = currInterval.span(next);
-      } else {
-        retVal.add(currInterval);
-        currInterval = next;
-      }
-    }
-    retVal.add(currInterval);
-
-    return retVal;
-  }
-
-  protected static boolean mergeable(Range range1, Range range2) {
-    Comparable x1 = range1.upperEndpoint();
-    Comparable x2 = range2.lowerEndpoint();
-    int compare = x1.compareTo(x2);
-    return compare > 0 || (compare == 0 && range1.upperBoundType() == BoundType.CLOSED
-            && range2.lowerBoundType() == BoundType.CLOSED);
-  }
-
-  public static long extractTotalTime(List<Interval> intervals) {
-    long totalTime = 0;
-    for (Interval interval : intervals) {
-      totalTime += (interval.getEndMillis() - interval.getStartMillis());
-    }
-    return totalTime;
-  }
-
-}


[34/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
index 739d0e1..6b59497 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
@@ -69,17 +69,13 @@ POSTHOOK: Lineage: tint.cint SIMPLE [(tint_txt)tint_txt.FieldSchema(name:cint, t
 POSTHOOK: Lineage: tint.rnum SIMPLE [(tint_txt)tint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tint_txt.rnum	tint_txt.cint
 Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Map 1' is a cross product
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint, (case when (tint.cint between tsint.csint and tsint.csint) then "Ok" else "NoOk" end) as between_col from tint , tsint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint, (case when (tint.cint between tsint.csint and tsint.csint) then "Ok" else "NoOk" end) as between_col from tint , tsint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -97,16 +93,9 @@ STAGE PLANS:
                 TableScan
                   alias: tint
                   Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: rnum (type: int), cint (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -114,11 +103,6 @@ STAGE PLANS:
                       keys:
                         0 
                         1 
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: Not empty key IS false
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         1 Map 2
@@ -126,17 +110,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col2 (type: int), _col1 (type: int), _col3 (type: smallint), CASE WHEN (_col1 BETWEEN _col3 AND _col3) THEN ('Ok') ELSE ('NoOk') END (type: string)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 2, 1, 3, 5]
-                            selectExpressions: VectorUDFAdaptor(CASE WHEN (_col1 BETWEEN _col3 AND _col3) THEN ('Ok') ELSE ('NoOk') END)(children: VectorUDFAdaptor(_col1 BETWEEN _col3 AND _col3) -> 4:Long) -> 5:String
                         Statistics: Num rows: 25 Data size: 385 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 25 Data size: 385 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -144,49 +120,21 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tsint
                   Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: rnum (type: int), csint (type: smallint)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: int), _col1 (type: smallint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -232,17 +180,13 @@ tint.rnum	tsint.rnum	tint.cint	tsint.csint	between_col
 4	3	10	1	NoOk
 4	4	10	10	Ok
 Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -260,16 +204,9 @@ STAGE PLANS:
                 TableScan
                   alias: tint
                   Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: rnum (type: int), cint (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -277,35 +214,19 @@ STAGE PLANS:
                       keys:
                         0 
                         1 
-                      Map Join Vectorization:
-                          className: VectorMapJoinOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: Not empty key IS false
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 25 Data size: 385 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        Filter Vectorization:
-                            className: VectorFilterOperator
-                            native: true
-                            predicateExpression: SelectColumnIsTrue(col 4)(children: VectorUDFAdaptor(_col1 BETWEEN _col3 AND _col3) -> 4:Long) -> boolean
                         predicate: _col1 BETWEEN _col3 AND _col3 (type: boolean)
                         Statistics: Num rows: 12 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: int), _col2 (type: int), _col1 (type: int), _col3 (type: smallint)
                           outputColumnNames: _col0, _col1, _col2, _col3
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0, 2, 1, 3]
                           Statistics: Num rows: 12 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 12 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -313,49 +234,21 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tsint
                   Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: rnum (type: int), csint (type: smallint)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: int), _col1 (type: smallint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
index 993a62f..0d4425b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
@@ -12,14 +12,10 @@ POSTHOOK: Lineage: decimal_date_test.cdate EXPRESSION [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: decimal_date_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_date_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_date_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -37,66 +33,28 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnInList(col 3, values [-67, -171]) -> boolean
                     predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -109,14 +67,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -134,78 +88,32 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsFalse(col 4)(children: LongColumnInList(col 3, values [-67, -171, 20]) -> 4:boolean) -> boolean
                     predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -218,14 +126,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -243,66 +147,28 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> boolean
                     predicate: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -315,14 +181,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -340,78 +202,32 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsFalse(col 4)(children: DecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> 4:boolean) -> boolean
                     predicate: (not (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568)) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -424,14 +240,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -449,66 +261,28 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnBetween(col 3, left -2, right 1) -> boolean
                     predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -521,14 +295,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -546,66 +316,28 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnNotBetween(col 3, left -610, right 608) -> boolean
                     predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -618,14 +350,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -643,66 +371,28 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnBetween(col 1, left -20, right 45.9918918919) -> boolean
                     predicate: cdecimal1 BETWEEN -20 AND 45.9918918919 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -715,14 +405,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -740,78 +426,32 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnNotBetween(col 1, left -2000, right 4390.1351351351) -> boolean
                     predicate: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1071,16 +711,12 @@ POSTHOOK: Input: default@decimal_date_test
 6172
 PREHOOK: query: -- projections
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
 POSTHOOK: query: -- projections
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1098,27 +734,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: LongColumnInList(col 3, values [-67, -171]) -> 4:boolean
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1127,50 +748,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1183,14 +775,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1208,27 +796,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: DecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> 4:boolean
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1237,50 +810,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1293,14 +837,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1318,27 +858,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: VectorUDFAdaptor(cdate BETWEEN 1969-12-30 AND 1970-01-02) -> 4:Long
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1347,50 +872,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1403,14 +899,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1428,27 +920,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: VectorUDFAdaptor(cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351) -> 4:Long
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1457,50 +934,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[52/62] hive git commit: HIVE-14991: JDBC result set iterator has useless DEBUG log (Prasanth Jayachandran reviewed by Vaibhav Gumashta)

Posted by we...@apache.org.
HIVE-14991: JDBC result set iterator has useless DEBUG log (Prasanth Jayachandran reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3bab49a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3bab49a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3bab49a3

Branch: refs/heads/hive-14535
Commit: 3bab49a3210c857d599cb5706706d2c2a30ee63e
Parents: ad6ce07
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Mon Oct 17 14:09:27 2016 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Mon Oct 17 14:09:27 2016 -0700

----------------------------------------------------------------------
 jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3bab49a3/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java
index 92fdbca..6a91381 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java
@@ -377,7 +377,6 @@ public class HiveQueryResultSet extends HiveBaseResultSet {
         fetchedRowsItr = fetchedRows.iterator();
       }
 
-      String rowStr = "";
       if (fetchedRowsItr.hasNext()) {
         row = fetchedRowsItr.next();
       } else {
@@ -385,10 +384,6 @@ public class HiveQueryResultSet extends HiveBaseResultSet {
       }
 
       rowsFetched++;
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Fetched row string: " + rowStr);
-      }
-
     } catch (SQLException eS) {
       throw eS;
     } catch (Exception ex) {


[60/62] hive git commit: HIVE-14927: Remove code duplication from tests in TestLdapAtnProviderWithMiniDS (Illya Yalovyy via Chaoyu Tang)

Posted by we...@apache.org.
HIVE-14927: Remove code duplication from tests in TestLdapAtnProviderWithMiniDS (Illya Yalovyy via Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8888fe4d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8888fe4d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8888fe4d

Branch: refs/heads/hive-14535
Commit: 8888fe4d88d6459f7b3672d3cc32e87b3c742214
Parents: b597ab2
Author: ctang <ct...@cloudera.com>
Authored: Tue Oct 18 17:04:50 2016 -0400
Committer: ctang <ct...@cloudera.com>
Committed: Tue Oct 18 17:04:50 2016 -0400

----------------------------------------------------------------------
 .../auth/TestLdapAtnProviderWithMiniDS.java     | 1093 ++++++------------
 .../auth/ldap/LdapAuthenticationTestCase.java   |  142 +++
 .../org/apache/hive/service/auth/ldap/User.java |   99 ++
 3 files changed, 605 insertions(+), 729 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8888fe4d/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithMiniDS.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithMiniDS.java b/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithMiniDS.java
index 8c723cf..cd62935 100644
--- a/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithMiniDS.java
+++ b/service/src/test/org/apache/hive/service/auth/TestLdapAtnProviderWithMiniDS.java
@@ -20,12 +20,6 @@
 
 package org.apache.hive.service.auth;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Iterator;
-
-import javax.security.sasl.AuthenticationException;
-
 import org.apache.directory.server.annotations.CreateLdapServer;
 import org.apache.directory.server.annotations.CreateTransport;
 import org.apache.directory.server.core.annotations.ApplyLdifFiles;
@@ -36,14 +30,12 @@ import org.apache.directory.server.core.annotations.CreatePartition;
 import org.apache.directory.server.core.integ.AbstractLdapTestUnit;
 import org.apache.directory.server.core.integ.FrameworkRunner;
 
-import org.apache.hadoop.hive.conf.HiveConf;
-import static org.junit.Assert.assertTrue;
+import org.apache.hive.service.auth.ldap.LdapAuthenticationTestCase;
+import org.apache.hive.service.auth.ldap.User;
 import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
+import static org.junit.Assert.assertTrue;
 
 
 /**
@@ -78,22 +70,39 @@ import org.junit.runner.RunWith;
 @ApplyLdifFiles("ldap/example.com.ldif")
 public class TestLdapAtnProviderWithMiniDS extends AbstractLdapTestUnit {
 
-  private static LdapAuthenticationProviderImpl ldapProvider;
-  Map<String, String> ldapProperties;
-
-  static final User USER1 = new User("user1", "user1", "uid=user1,ou=People,dc=example,dc=com");
-  static final User USER2 = new User("user2", "user2", "uid=user2,ou=People,dc=example,dc=com");
-  static final User USER3 = new User("user3", "user3", "cn=user3,ou=People,dc=example,dc=com");
-  static final User USER4 = new User("user4", "user4", "cn=user4,ou=People,dc=example,dc=com");
-
-  @Before
-  public void setup() throws Exception {
-    ldapProperties = new HashMap<>();
-  }
-
-  @BeforeClass
-  public static void init() throws Exception {
-    ldapProvider = new LdapAuthenticationProviderImpl(new HiveConf());
+  private static final String GROUP1_NAME = "group1";
+  private static final String GROUP2_NAME = "group2";
+  private static final String GROUP3_NAME = "group3";
+  private static final String GROUP4_NAME = "group4";
+
+  private static final User USER1 = User.builder()
+      .id("user1")
+      .useIdForPassword()
+      .dn("uid=user1,ou=People,dc=example,dc=com")
+      .build();
+
+  private static final User USER2 = User.builder()
+      .id("user2")
+      .useIdForPassword()
+      .dn("uid=user2,ou=People,dc=example,dc=com")
+      .build();
+
+  private static final User USER3 = User.builder()
+      .id("user3")
+      .useIdForPassword()
+      .dn("cn=user3,ou=People,dc=example,dc=com")
+      .build();
+
+  private static final User USER4 = User.builder()
+      .id("user4")
+      .useIdForPassword()
+      .dn("cn=user4,ou=People,dc=example,dc=com")
+      .build();
+
+  private LdapAuthenticationTestCase testCase;
+
+  private LdapAuthenticationTestCase.Builder defaultBuilder() {
+    return LdapAuthenticationTestCase.builder().ldapServer(ldapServer);
   }
 
   @AfterClass
@@ -103,633 +112,322 @@ public class TestLdapAtnProviderWithMiniDS extends AbstractLdapTestUnit {
     }
   }
 
-  private void initLdapAtn() throws Exception {
-    String ldapUrl = "ldap://localhost:" + ldapServer.getPort();
-
-    HiveConf hiveConf = new HiveConf();
-    hiveConf.set("hive.root.logger", "DEBUG,console");
-    hiveConf.set("hive.server2.authentication.ldap.url", ldapUrl);
-
-    for (Map.Entry<String, String> entry : ldapProperties.entrySet()) {
-      hiveConf.set(entry.getKey(), entry.getValue());
-    }
-
-    ldapProvider = new LdapAuthenticationProviderImpl(hiveConf);
-  }
-
   @Test
   public void testLDAPServer() throws Exception {
-    initLdapAtn();
     assertTrue(ldapServer.isStarted());
     assertTrue(ldapServer.getPort() > 0);
   }
 
   @Test
-  public void testUserBindPositiveWithShortname() throws Exception {
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    initLdapAtn();
-    String user;
-
-    user = USER1.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user + " with password "
-                  + USER1.getPassword() + ", expected to succeed");
-    }
-
-    user = USER2.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + USER2.getUID() + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user + " with password "
-                  + USER2.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithShortname() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
   }
 
   @Test
-  public void testUserBindPositiveWithShortnameOldConfig() throws Exception {
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    initLdapAtn();
-    String user;
-
-    user = USER1.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user + " with password "
-                  + USER1.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithShortnameOldConfig() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .build();
 
-    user = USER2.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + USER2.getUID() + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user + " with password "
-                  + USER2.getPassword() + ", expected to succeed");
-    }
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
   }
 
   @Test
-  public void testUserBindNegativeWithShortname() throws Exception {
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    initLdapAtn();
-
-    try {
-      ldapProvider.Authenticate(USER1.getUID(), USER2.getPassword());
-      Assert.fail("testUserBindNegative: Authentication succeeded for " + USER1.getUID() + " with password "
-                  + USER2.getPassword() + ", expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + USER1.getUID() + " as expected", true);
-    }
-
-    try {
-      ldapProvider.Authenticate(USER2.getUID(), "user");
-      Assert.fail("testUserBindNegative: Authentication failed for " + USER2.getUID() + " with password user, expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + USER2.getUID() + " as expected", true);
-    }
+  public void testUserBindNegativeWithShortname() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER1.credentialsWithId());
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER2.credentialsWithId());
   }
 
   @Test
-  public void testUserBindNegativeWithShortnameOldConfig() throws Exception {
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    initLdapAtn();
-
-    try {
-      ldapProvider.Authenticate(USER1.getUID(), USER2.getPassword());
-      Assert.fail("testUserBindNegative: Authentication succeeded for " + USER1.getUID() + " with password "
-                  + USER2.getPassword() + ", expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + USER1.getUID() + " as expected", true);
-    }
-
-    try {
-      ldapProvider.Authenticate(USER2.getUID(), "user");
-      Assert.fail("testUserBindNegative: Authentication failed for " + USER2.getUID() + " with password user, expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + USER2.getUID() + " as expected", true);
-    }
+  public void testUserBindNegativeWithShortnameOldConfig() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER1.credentialsWithId());
+    testCase.assertAuthenticateFails(
+        USER1.getDn(),
+        USER2.getPassword());
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER2.credentialsWithId());
   }
 
   @Test
-  public void testUserBindPositiveWithDN() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed:" + e.getMessage());
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " user as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER2.getPassword() + ", expected to succeed:" + e.getMessage());
-    }
+  public void testUserBindPositiveWithDN() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindPositiveWithDNOldConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithDNOldConfig() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .build();
 
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER2.getPassword() + ", expected to succeed");
-    }
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindPositiveWithDNWrongOldConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=DummyPeople,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithDNWrongOldConfig() {
+    testCase = defaultBuilder()
+        .baseDN("ou=DummyPeople,dc=example,dc=com")
+        .build();
 
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password "
-                  + USER2.getPassword() + ", expected to succeed");
-    }
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindPositiveWithDNWrongConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=DummyPeople,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=DummyGroups,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed");
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER2.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithDNWrongConfig() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=DummyPeople,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=DummyGroups,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindPositiveWithDNBlankConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", " ");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", " ");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed");
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER2.getPassword() + ", expected to succeed");
-    }
+  public void testUserBindPositiveWithDNBlankConfig() {
+    testCase = defaultBuilder()
+        .userDNPatterns(" ")
+        .groupDNPatterns(" ")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
   public void testUserBindPositiveWithDNBlankOldConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
+    testCase = defaultBuilder()
+        .baseDN("")
+        .build();
 
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER1.getPassword() + ", expected to succeed");
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserBindPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserBindPositive: Authentication failed for user:" + user +
-                    " with password " + USER2.getPassword() + ", expected to succeed");
-    }
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindNegativeWithDN() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserBindNegative: Authentication succeeded for " + user + " with password " +
-                   USER2.getPassword() + ", expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, "user");
-      Assert.fail("testUserBindNegative: Authentication failed for " + user + " with password user, " +
-                    "expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testUserBindNegativeWithDN() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER1.credentialsWithDn());
+    testCase.assertAuthenticateFails(
+        USER1.getDn(),
+        USER2.getPassword());
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserBindNegativeWithDNOldConfig() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    initLdapAtn();
-    assertTrue(ldapServer.getPort() > 0);
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserBindNegative: Authentication succeeded for " + user + " with password " +
-                   USER2.getPassword() + ", expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, "user");
-      Assert.fail("testUserBindNegative: Authentication failed for " + user + " with password user, " +
-                    "expected to fail");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserBindNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testUserBindNegativeWithDNOldConfig() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .build();
+
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER1.credentialsWithDn());
+    testCase.assertAuthenticateFails(
+        USER1.getDn(),
+        USER2.getPassword());
+    testCase.assertAuthenticateFailsUsingWrongPassword(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserFilterPositive() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER2.getUID());
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER2.getUID();
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserFilterPositive: Authentication failed for " + user + ",user expected to pass userfilter");
-    }
-
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER1.getUID());
-    initLdapAtn();
-
-    try {
-      user = USER1.getDN();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER1.getUID();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserFilterPositive: Authentication failed for " + user + ",user expected to pass userfilter");
-    }
-
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER2.getUID() + "," + USER1.getUID());
-    initLdapAtn();
-
-    try {
-      user = USER1.getDN();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER2.getUID();
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserFilterPositive: Authentication failed for user, user is expected to pass userfilter");
-    }
+  public void testUserFilterPositive() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(USER1.getId())
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(USER2.getId())
+        .build();
+
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(
+            USER1.getId(),
+            USER2.getId())
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserFilterNegative() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER2.getUID());
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user is expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    user = USER1.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user is expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER1.getUID());
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user is expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    user = USER2.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user is expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER3.getUID());
-    initLdapAtn();
-
-    user = USER1.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserFilterNegative: Authentication succeeded for " + user + ",user expected to fail userfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testUserFilterNegative() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(USER2.getId())
+        .build();
+
+    testCase.assertAuthenticateFails(USER1.credentialsWithId());
+    testCase.assertAuthenticateFails(USER1.credentialsWithDn());
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(USER1.getId())
+        .build();
+
+    testCase.assertAuthenticateFails(USER2.credentialsWithId());
+    testCase.assertAuthenticateFails(USER2.credentialsWithDn());
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .userFilters(USER3.getId())
+        .build();
+
+    testCase.assertAuthenticateFails(USER1.credentialsWithId());
+    testCase.assertAuthenticateFails(USER2.credentialsWithId());
   }
 
   @Test
-  public void testGroupFilterPositive() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group1,group2");
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER1.getUID();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER2.getDN();
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
-
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group2");
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
+  public void testGroupFilterPositive() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(
+            GROUP1_NAME,
+            GROUP2_NAME)
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(GROUP2_NAME)
+        .build();
+
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testGroupFilterNegative() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group1");
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testGroupFilterNegative: Authentication succeeded for " + user + ",user expected to fail groupfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testGroupFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testGroupFilterNegative() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(GROUP2_NAME)
+        .build();
 
-    ldapProperties = new HashMap<>();
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group2");
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      Assert.fail("testGroupFilterNegative: Authentication succeeded for " + user + ",user expected to fail groupfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testGroupFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
-  }
 
-  @Test
-  public void testUserAndGroupFilterPositive() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER1.getUID() + "," + USER2.getUID());
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group1,group2");
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserAndGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER1.getUID();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testUserAndGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserAndGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
+    testCase.assertAuthenticateFails(USER1.credentialsWithId());
+    testCase.assertAuthenticateFails(USER1.credentialsWithDn());
 
-    user = USER2.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      assertTrue("testUserAndGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testUserAndGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
+
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(GROUP1_NAME)
+        .build();
+
+    testCase.assertAuthenticateFails(USER2.credentialsWithId());
+    testCase.assertAuthenticateFails(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testUserAndGroupFilterNegative() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "uid=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userFilter", USER1.getUID() + "," + USER2.getUID());
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group1");
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserAndGroupFilterNegative: Authentication succeeded for " + user + ",user expected to fail groupfilter");
-
-      user = USER2.getUID();
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testUserAndGroupFilterNegative: Authentication succeeded for " + user + ",user expected to fail groupfilter");
-
-      user = USER3.getUID();
-      ldapProvider.Authenticate(user, USER3.getPassword());
-      Assert.fail("testUserAndGroupFilterNegative: Authentication succeeded for " + user + ",user expected to fail groupfilter");
-    } catch (AuthenticationException e) {
-      assertTrue("testUserAndGroupFilterNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testUserAndGroupFilterPositive() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .userFilters(
+            USER1.getId(),
+            USER2.getId())
+        .groupFilters(
+            GROUP1_NAME,
+            GROUP2_NAME)
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
   }
 
   @Test
-  public void testCustomQueryPositive() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com:uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "cn=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery", "(&(objectClass=person)(|(uid="
-                       + USER1.getUID() + ")(uid=" + USER4.getUID() + ")))");
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testCustomQueryPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER1.getUID();
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testCustomQueryPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER4.getDN();
-      ldapProvider.Authenticate(user, USER4.getPassword());
-      assertTrue("testCustomQueryPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testCustomQueryPositive: Authentication failed for " + user + ",user expected to pass custom LDAP Query");
-    }
+  public void testUserAndGroupFilterNegative() {
+    testCase = defaultBuilder()
+        .userDNPatterns("uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("uid=%s,ou=Groups,dc=example,dc=com")
+        .userFilters(
+            USER1.getId(),
+            USER2.getId())
+        .groupFilters(
+            GROUP3_NAME,
+            GROUP3_NAME)
+        .build();
+
+    testCase.assertAuthenticateFails(USER2.credentialsWithDn());
+    testCase.assertAuthenticateFails(USER2.credentialsWithId());
+    testCase.assertAuthenticateFails(USER3.credentialsWithDn());
+    testCase.assertAuthenticateFails(USER3.credentialsWithId());
   }
 
   @Test
-  public void testCustomQueryNegative() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "ou=People,dc=example,dc=com");
-    // ldap query will only return user1
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery", "(&(objectClass=person)(uid="
-                       + USER1.getUID() + "))");
-    initLdapAtn();
-
-    user = USER2.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testCustomQueryNegative: Authentication succeeded for " + user + ",user expected to fail custom LDAP Query");
-    } catch (AuthenticationException e) {
-      assertTrue("testCustomQueryNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testCustomQueryPositive() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .userDNPatterns(
+            "cn=%s,ou=People,dc=example,dc=com",
+            "uid=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("cn=%s,ou=People,dc=example,dc=com")
+        .customQuery(
+            String.format("(&(objectClass=person)(|(uid=%s)(uid=%s)))",
+                USER1.getId(),
+                USER4.getId()))
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithDn());
+  }
 
-    try {
-      user = USER2.getUID();
-      ldapProvider.Authenticate(user, USER2.getPassword());
-      Assert.fail("testCustomQueryNegative: Authentication succeeded for " + user + ",user expected to fail custom LDAP Query");
-    } catch (AuthenticationException e) {
-      assertTrue("testCustomQueryNegative: Authentication failed for " + user + " as expected", true);
-    }
+  @Test
+  public void testCustomQueryNegative() {
+    testCase = defaultBuilder()
+        .baseDN("ou=People,dc=example,dc=com")
+        .customQuery(
+            String.format("(&(objectClass=person)(uid=%s))",
+                USER1.getId()))
+        .build();
+
+    testCase.assertAuthenticateFails(USER2.credentialsWithDn());
+    testCase.assertAuthenticateFails(USER2.credentialsWithId());
   }
 
   /**
@@ -740,164 +438,101 @@ public class TestLdapAtnProviderWithMiniDS extends AbstractLdapTestUnit {
    "hive.server2.authentication.ldap.groupMembershipKey"
    */
   @Test
-  public void testCustomQueryWithGroupsPositive() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com:uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery",
-                         "(&(objectClass=groupOfNames)(|(cn=group1)(cn=group2)))");
-    initLdapAtn();
-
-    user = USER1.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-
-       user = USER2.getUID();
-       ldapProvider.Authenticate(user, USER2.getPassword());
-       assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testCustomQueryWithGroupsPositive: Authentication failed for " + user + ",user expected to pass custom LDAP Query");
-    }
+  public void testCustomQueryWithGroupsPositive() {
+    testCase = defaultBuilder()
+        .baseDN("dc=example,dc=com")
+        .userDNPatterns(
+            "cn=%s,ou=People,dc=example,dc=com",
+            "uid=%s,ou=People,dc=example,dc=com")
+        .customQuery(
+            String.format("(&(objectClass=groupOfNames)(|(cn=%s)(cn=%s)))",
+                GROUP1_NAME,
+                GROUP2_NAME))
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER2.credentialsWithDn());
 
     /* the following test uses a query that returns a group and a user entry.
        the ldap atn should use the groupMembershipKey to identify the users for the returned group
        and the authentication should succeed for the users of that group as well as the lone user4 in this case
     */
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com:uid=%s,ou=People,dc=example,dc=com");
-    // following query should return group1 and user2
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery",
-                         "(|(&(objectClass=groupOfNames)(cn=group1))(&(objectClass=person)(sn=user4)))");
-    initLdapAtn();
-
-    user = USER1.getUID();
-    try {
-      ldapProvider.Authenticate(user, USER1.getPassword());
-      assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-
-       user = USER4.getUID();
-       ldapProvider.Authenticate(user, USER4.getPassword());
-       assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testCustomQueryWithGroupsPositive: Authentication failed for " + user + ",user expected to pass custom LDAP Query");
-    }
-
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com:uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupMembershipKey", "uniqueMember");
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery",
-                         "(&(objectClass=groupOfUniqueNames)(cn=group4))");
-    initLdapAtn();
-
-    user = USER4.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER4.getPassword());
-      assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER4.getUID();
-      ldapProvider.Authenticate(user, USER4.getPassword());
-      assertTrue("testCustomQueryWithGroupsPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testCustomQueryWithGroupsPositive: Authentication failed for " + user + ",user expected to pass custom LDAP Query");
-    }
+    testCase = defaultBuilder()
+        .baseDN("dc=example,dc=com")
+        .userDNPatterns(
+            "cn=%s,ou=People,dc=example,dc=com",
+            "uid=%s,ou=People,dc=example,dc=com")
+        .customQuery(
+            String.format("(|(&(objectClass=groupOfNames)(cn=%s))(&(objectClass=person)(sn=%s)))",
+                GROUP1_NAME,
+                USER4.getId()))
+        .build();
+
+    testCase.assertAuthenticatePasses(USER1.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER1.credentialsWithDn());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithDn());
+
+
+    testCase = defaultBuilder()
+        .baseDN("dc=example,dc=com")
+        .userDNPatterns(
+            "cn=%s,ou=People,dc=example,dc=com",
+            "uid=%s,ou=People,dc=example,dc=com")
+        .groupMembership("uniqueMember")
+        .customQuery(
+            String.format("(&(objectClass=groupOfUniqueNames)(cn=%s))",
+                GROUP4_NAME))
+        .build();
+
+    testCase.assertAuthenticatePasses(USER4.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithDn());
   }
 
   @Test
-  public void testCustomQueryWithGroupsNegative() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.baseDN", "dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com:uid=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.customLDAPQuery",
-                         "(&(objectClass=groupOfNames)(|(cn=group1)(cn=group2)))");
-    initLdapAtn();
-
-    user = USER3.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER3.getPassword());
-      Assert.fail("testCustomQueryNegative: Authentication succeeded for " + user + ",user expected to fail custom LDAP Query");
-    } catch (AuthenticationException e) {
-      assertTrue("testCustomQueryNegative: Authentication failed for " + user + " as expected", true);
-    }
-
-    try {
-      user = USER3.getUID();
-      ldapProvider.Authenticate(user, USER3.getPassword());
-      Assert.fail("testCustomQueryNegative: Authentication succeeded for " + user + ",user expected to fail custom LDAP Query");
-    } catch (AuthenticationException e) {
-      assertTrue("testCustomQueryNegative: Authentication failed for " + user + " as expected", true);
-    }
+  public void testCustomQueryWithGroupsNegative() {
+    testCase = defaultBuilder()
+        .baseDN("dc=example,dc=com")
+        .userDNPatterns(
+            "cn=%s,ou=People,dc=example,dc=com",
+            "uid=%s,ou=People,dc=example,dc=com")
+        .customQuery(
+            String.format("(&(objectClass=groupOfNames)(|(cn=%s)(cn=%s)))",
+                GROUP1_NAME,
+                GROUP2_NAME))
+        .build();
+
+    testCase.assertAuthenticateFails(USER3.credentialsWithDn());
+    testCase.assertAuthenticateFails(USER3.credentialsWithId());
   }
 
   @Test
-  public void testGroupFilterPositiveWithCustomGUID() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "cn=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.guidKey", "cn");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group3");
-    initLdapAtn();
-
-    user = USER3.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER3.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER3.getUID();
-      ldapProvider.Authenticate(user, USER3.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
+  public void testGroupFilterPositiveWithCustomGUID() {
+    testCase = defaultBuilder()
+        .userDNPatterns("cn=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("cn=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(GROUP3_NAME)
+        .guidKey("cn")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER3.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER3.credentialsWithDn());
   }
 
   @Test
-  public void testGroupFilterPositiveWithCustomAttributes() throws Exception {
-    String user;
-    ldapProperties.put("hive.server2.authentication.ldap.userDNPattern", "cn=%s,ou=People,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupDNPattern", "cn=%s,ou=Groups,dc=example,dc=com");
-    ldapProperties.put("hive.server2.authentication.ldap.groupFilter", "group4");
-    ldapProperties.put("hive.server2.authentication.ldap.guidKey", "cn");
-    ldapProperties.put("hive.server2.authentication.ldap.groupMembershipKey", "uniqueMember");
-    ldapProperties.put("hive.server2.authentication.ldap.groupClassKey", "groupOfUniqueNames");
-    initLdapAtn();
-
-    user = USER4.getDN();
-    try {
-      ldapProvider.Authenticate(user, USER4.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-
-      user = USER4.getUID();
-      ldapProvider.Authenticate(user, USER4.getPassword());
-      assertTrue("testGroupFilterPositive: Authentication succeeded for " + user + " as expected", true);
-    } catch (AuthenticationException e) {
-      Assert.fail("testGroupFilterPositive: Authentication failed for " + user + ",user expected to pass groupfilter");
-    }
-
-  }
-
-  private static class User {
-    String uid;
-    String pwd;
-    String ldapDN;
-
-    User(String uid, String password, String ldapDN) {
-      this.uid    = uid;
-      this.pwd    = password;
-      this.ldapDN = ldapDN;
-    }
-
-    public String getUID() {
-      return uid;
-    }
-
-    public String getPassword() {
-      return pwd;
-    }
-
-    public String getDN() {
-      return ldapDN;
-    }
+  public void testGroupFilterPositiveWithCustomAttributes() {
+    testCase = defaultBuilder()
+        .userDNPatterns("cn=%s,ou=People,dc=example,dc=com")
+        .groupDNPatterns("cn=%s,ou=Groups,dc=example,dc=com")
+        .groupFilters(GROUP4_NAME)
+        .guidKey("cn")
+        .groupMembership("uniqueMember")
+        .groupClassKey("groupOfUniqueNames")
+        .build();
+
+    testCase.assertAuthenticatePasses(USER4.credentialsWithId());
+    testCase.assertAuthenticatePasses(USER4.credentialsWithDn());
   }
 }
-

http://git-wip-us.apache.org/repos/asf/hive/blob/8888fe4d/service/src/test/org/apache/hive/service/auth/ldap/LdapAuthenticationTestCase.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/auth/ldap/LdapAuthenticationTestCase.java b/service/src/test/org/apache/hive/service/auth/ldap/LdapAuthenticationTestCase.java
new file mode 100644
index 0000000..acde8c1
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/auth/ldap/LdapAuthenticationTestCase.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.auth.ldap;
+
+import javax.security.sasl.AuthenticationException;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
+import java.util.Map;
+import org.apache.directory.server.ldap.LdapServer;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.service.auth.LdapAuthenticationProviderImpl;
+import org.junit.Assert;
+
+public final class LdapAuthenticationTestCase {
+
+  private final LdapAuthenticationProviderImpl ldapProvider;
+
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  private LdapAuthenticationTestCase(Builder builder) {
+    this.ldapProvider = new LdapAuthenticationProviderImpl(builder.conf);
+  }
+
+  public void assertAuthenticatePasses(Credentials credentials) {
+    try {
+      ldapProvider.Authenticate(credentials.getUser(), credentials.getPassword());
+    } catch (AuthenticationException e) {
+      String message = String.format("Authentication failed for user '%s' with password '%s'",
+          credentials.getUser(), credentials.getPassword());
+      throw new AssertionError(message, e);
+    }
+  }
+
+  public void assertAuthenticateFails(Credentials credentials) {
+    assertAuthenticateFails(credentials.getUser(), credentials.getPassword());
+  }
+
+  public void assertAuthenticateFailsUsingWrongPassword(Credentials credentials) {
+    assertAuthenticateFails(credentials.getUser(), "not" + credentials.getPassword());
+  }
+
+  public void assertAuthenticateFails(String user, String password) {
+    try {
+      ldapProvider.Authenticate(user, password);
+      Assert.fail(String.format("Expected authentication to fail for %s", user));
+    } catch (AuthenticationException expected) {
+      Assert.assertNotNull("Expected authentication exception", expected);
+    }
+  }
+
+  public static final class Builder {
+
+    private final Map<HiveConf.ConfVars, String> overrides = new EnumMap<>(HiveConf.ConfVars.class);
+    private HiveConf conf;
+
+    public Builder baseDN(String baseDN) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN, baseDN);
+    }
+
+    public Builder guidKey(String guidKey) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GUIDKEY, guidKey);
+    }
+
+    public Builder userDNPatterns(String... userDNPatterns) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN,
+          Joiner.on(':').join(userDNPatterns));
+    }
+
+    public Builder userFilters(String... userFilters) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER,
+          Joiner.on(',').join(userFilters));
+    }
+
+    public Builder groupDNPatterns(String... groupDNPatterns) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN,
+          Joiner.on(':').join(groupDNPatterns));
+    }
+
+    public Builder groupFilters(String... groupFilters) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER,
+          Joiner.on(',').join(groupFilters));
+    }
+
+    public Builder groupClassKey(String groupClassKey) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY, groupClassKey);
+    }
+
+    public Builder ldapServer(LdapServer ldapServer) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_URL,
+          "ldap://localhost:" + ldapServer.getPort());
+    }
+
+    public Builder customQuery(String customQuery) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY, customQuery);
+    }
+
+    public Builder groupMembership(String groupMembership) {
+      return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY, groupMembership);
+    }
+
+    private Builder setVarOnce(HiveConf.ConfVars confVar, String value) {
+      Preconditions.checkState(!overrides.containsKey(confVar),
+          "Property %s has been set already", confVar);
+      overrides.put(confVar, value);
+      return this;
+    }
+
+    private void overrideHiveConf() {
+      conf.set("hive.root.logger", "DEBUG,console");
+      for (Map.Entry<HiveConf.ConfVars, String> entry : overrides.entrySet()) {
+        conf.setVar(entry.getKey(), entry.getValue());
+      }
+    }
+
+    public LdapAuthenticationTestCase build() {
+      Preconditions.checkState(conf == null,
+          "Test Case Builder should not be reused. Please create a new instance.");
+      conf = new HiveConf();
+      overrideHiveConf();
+      return new LdapAuthenticationTestCase(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/8888fe4d/service/src/test/org/apache/hive/service/auth/ldap/User.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/auth/ldap/User.java b/service/src/test/org/apache/hive/service/auth/ldap/User.java
new file mode 100644
index 0000000..a7aa1aa
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/auth/ldap/User.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.auth.ldap;
+
+import com.google.common.base.Preconditions;
+
+public final class User {
+
+  private final String dn;
+  private final String id;
+  private final String password;
+
+  private User(Builder builder) {
+    dn = builder.dn;
+    id = builder.id;
+    password = builder.password;
+  }
+
+  public String getDn() {
+    return dn;
+  }
+
+  public String getId() {
+    return id;
+  }
+
+  public String getPassword() {
+    return password;
+  }
+
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  public Credentials credentialsWithDn() {
+    return Credentials.of(dn, password);
+  }
+
+  public Credentials credentialsWithId() {
+    return Credentials.of(id, password);
+  }
+
+  public static final class Builder {
+    private String dn;
+    private String id;
+    private String password;
+
+    private Builder() {
+    }
+
+    public Builder dn(String dn) {
+      Preconditions.checkNotNull(dn, "DN should not be NULL");
+      Preconditions.checkState(this.dn == null, "DN has been set already");
+      this.dn = dn;
+      return this;
+    }
+
+    public Builder id(String id) {
+      Preconditions.checkNotNull(id, "ID should not be NULL");
+      Preconditions.checkState(this.id == null, "ID has been set already");
+      this.id = id;
+      return this;
+    }
+
+    public Builder password(String password) {
+      Preconditions.checkNotNull(password, "Password should not be NULL");
+      Preconditions.checkState(this.password == null, "Password has been set already");
+      this.password = password;
+      return this;
+    }
+
+    public Builder useIdForPassword() {
+      Preconditions.checkState(this.id != null, "User ID has not been set");
+      return password(id);
+    }
+
+    public User build() {
+      Preconditions.checkNotNull(this.dn, "DN is required");
+      Preconditions.checkNotNull(this.id, "ID is required");
+      Preconditions.checkNotNull(this.password, "Password is required");
+      return new User(this);
+    }
+  }
+}


[32/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
index 2789664..c7897f7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
+EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 ORDER BY cdouble, cstring1, cint, cfloat, csmallint, c
@@ -8,16 +8,12 @@ LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
+EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 ORDER BY cdouble, cstring1, cint, cfloat, csmallint, c
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -25,62 +21,53 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNull(col 5) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [6, 2, 4, 1, 16]
-                          selectExpressions: VectorCoalesce(columns [12, 6, 13, 14, 15])(children: ConstantVectorExpression(val null) -> 12:string, col 6, CastLongToString(col 2) -> 13:String, VectorUDFAdaptor(null(cfloat)) -> 14:String, CastLongToString(col 1) -> 15:String) -> 16:string
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 1045942 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: cdouble is null (type: boolean)
+                    Statistics: Num rows: 3114 Data size: 265164 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: cstring1 (type: string), cint (type: int), cfloat (type: float), csmallint (type: smallint), COALESCE(null,cstring1,cint,cfloat,csmallint) (type: string)
+                      outputColumnNames: _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 3114 Data size: 819540 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string), _col2 (type: int), _col3 (type: float), _col4 (type: smallint), _col5 (type: string)
+                        sort order: +++++
+                        Statistics: Num rows: 3114 Data size: 819540 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [5, 0, 1, 2, 3, 4]
-                    selectExpressions: ConstantVectorExpression(val null) -> 5:double
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
+              Select Operator
+                expressions: null (type: double), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: float), KEY.reducesinkkey3 (type: smallint), KEY.reducesinkkey4 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 3114 Data size: 246572 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c 
 FROM alltypesorc
@@ -108,22 +95,18 @@ NULL	NULL	-738306196	-51.0	NULL	-738306196
 NULL	NULL	-819152895	8.0	NULL	-819152895
 NULL	NULL	-827212561	8.0	NULL	-827212561
 NULL	NULL	-949587513	11.0	NULL	-949587513
-PREHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
+PREHOOK: query: EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
 FROM alltypesorc
 WHERE (ctinyint IS NULL)
 ORDER BY ctinyint, cdouble, cint, c
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
+POSTHOOK: query: EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
 FROM alltypesorc
 WHERE (ctinyint IS NULL)
 ORDER BY ctinyint, cdouble, cint, c
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -131,62 +114,53 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNull(col 0) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [5, 2, 15]
-                          selectExpressions: VectorCoalesce(columns [12, 14, 13])(children: ConstantVectorExpression(val null) -> 12:double, DoubleColAddDoubleColumn(col 5, col 13)(children: FuncLog2LongToDouble(col 2) -> 13:double) -> 14:double, ConstantVectorExpression(val 0.0) -> 13:double) -> 15:double
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 146792 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ctinyint is null (type: boolean)
+                    Statistics: Num rows: 3115 Data size: 37224 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: cdouble (type: double), cint (type: int), COALESCE(null,(cdouble + log2(cint)),0) (type: double)
+                      outputColumnNames: _col1, _col2, _col3
+                      Statistics: Num rows: 3115 Data size: 52844 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: double), _col2 (type: int), _col3 (type: double)
+                        sort order: +++
+                        Statistics: Num rows: 3115 Data size: 52844 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [3, 0, 1, 2]
-                    selectExpressions: ConstantVectorExpression(val null) -> 3:tinyint
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
+              Select Operator
+                expressions: null (type: tinyint), KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 3115 Data size: 27928 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
 FROM alltypesorc
@@ -214,22 +188,18 @@ NULL	NULL	-850295959	0.0
 NULL	NULL	-886426182	0.0
 NULL	NULL	-899422227	0.0
 NULL	NULL	-971543377	0.0
-PREHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
+PREHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
+POSTHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -237,61 +207,50 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNull(col 4) -> boolean, SelectColumnIsNull(col 3) -> boolean) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (cfloat is null and cbigint is null) (type: boolean)
+                    Statistics: Num rows: 790 Data size: 7092 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 790 Data size: 3172 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 790 Data size: 3172 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
-                    selectExpressions: ConstantVectorExpression(val null) -> 0:float, ConstantVectorExpression(val null) -> 1:bigint, ConstantVectorExpression(val 0.0) -> 2:double
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
+              Select Operator
+                expressions: null (type: float), null (type: bigint), 0.0 (type: float)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 790 Data size: 3172 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
 FROM alltypesorc
@@ -319,22 +278,18 @@ NULL	NULL	0.0
 NULL	NULL	0.0
 NULL	NULL	0.0
 NULL	NULL	0.0
-PREHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
+PREHOOK: query: EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
 FROM alltypesorc 
 WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL
 ORDER BY ctimestamp1, ctimestamp2, c
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
+POSTHOOK: query: EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
 FROM alltypesorc 
 WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL
 ORDER BY ctimestamp1, ctimestamp2, c
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -342,61 +297,53 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: SelectColumnIsNotNull(col 8) -> boolean, SelectColumnIsNotNull(col 9) -> boolean) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 9, 12]
-                          selectExpressions: VectorCoalesce(columns [8, 9])(children: col 8, col 9) -> 12:timestamp
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 983040 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (ctimestamp1 is not null or ctimestamp2 is not null) (type: boolean)
+                    Statistics: Num rows: 12288 Data size: 983040 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), COALESCE(ctimestamp1,ctimestamp2) (type: timestamp)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 12288 Data size: 1474560 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: timestamp)
+                        sort order: +++
+                        Statistics: Num rows: 12288 Data size: 1474560 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey1 (type: timestamp), KEY.reducesinkkey2 (type: timestamp)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 12288 Data size: 1474560 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
 FROM alltypesorc 
@@ -424,22 +371,18 @@ NULL	1969-12-31 15:59:43.684	1969-12-31 15:59:43.684
 NULL	1969-12-31 15:59:43.703	1969-12-31 15:59:43.703
 NULL	1969-12-31 15:59:43.704	1969-12-31 15:59:43.704
 NULL	1969-12-31 15:59:43.709	1969-12-31 15:59:43.709
-PREHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
+PREHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
+POSTHOOK: query: EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -447,61 +390,50 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNull(col 4) -> boolean, SelectColumnIsNull(col 3) -> boolean) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (cfloat is null and cbigint is null) (type: boolean)
+                    Statistics: Num rows: 790 Data size: 7092 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 790 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 790 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
-                    selectExpressions: ConstantVectorExpression(val null) -> 0:float, ConstantVectorExpression(val null) -> 1:bigint, ConstantVectorExpression(val null) -> 2:float
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
+              Select Operator
+                expressions: null (type: float), null (type: bigint), null (type: float)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 790 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
 FROM alltypesorc
@@ -529,61 +461,34 @@ NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
 NULL	NULL	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
+PREHOOK: query: EXPLAIN SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
 FROM alltypesorc
 WHERE cbigint IS NULL
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
+POSTHOOK: query: EXPLAIN SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
 FROM alltypesorc
 WHERE cbigint IS NULL
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNull(col 3) -> boolean
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [12, 0, 14]
-                          selectExpressions: ConstantVectorExpression(val null) -> 12:bigint, VectorCoalesce(columns [13, 0])(children: ConstantVectorExpression(val null) -> 13:tinyint, col 0) -> 14:tinyint
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
+      limit: 10
+      Processor Tree:
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: cbigint is null (type: boolean)
+            Select Operator
+              expressions: null (type: bigint), ctinyint (type: tinyint), COALESCE(null,ctinyint) (type: tinyint)
+              outputColumnNames: _col0, _col1, _col2
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
 FROM alltypesorc

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
index 67f21d6..b390bfd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
@@ -16,22 +16,18 @@ POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@str_str_orc
 POSTHOOK: Lineage: str_str_orc.str1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: str_str_orc.str2 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
 GROUP BY str2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
 GROUP BY str2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -110,18 +106,14 @@ POSTHOOK: Input: default@str_str_orc
 #### A masked pattern was here ####
 X	0.02
 y	0.0
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -170,22 +162,18 @@ POSTHOOK: Input: default@str_str_orc
 0
 1
 0
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
 GROUP BY str2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
 GROUP BY str2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -203,27 +191,12 @@ STAGE PLANS:
                 TableScan
                   alias: str_str_orc
                   Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: str2 (type: string), UDFToInteger(COALESCE(str1,0)) (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 4]
-                        selectExpressions: VectorUDFAdaptor(UDFToInteger(COALESCE(str1,0)))(children: VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) -> 2:string) -> 3:string) -> 4:Long
                     Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 4) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 1
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -232,41 +205,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -274,17 +221,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0), 2) (type: double)
                   outputColumnNames: _col0, _col1
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [0, 2]
-                      selectExpressions: RoundWithNumDigitsDoubleToDouble(col 3, decimalPlaces 2)(children: DoubleColDivideDoubleScalar(col 2, val 60.0)(children: CastLongToDouble(col 1) -> 2:double) -> 3:double) -> 2:double
                   Statistics: Num rows: 2 Data size: 255 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 2 Data size: 255 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -313,18 +252,14 @@ POSTHOOK: Input: default@str_str_orc
 #### A masked pattern was here ####
 X	0.02
 y	0.0
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -339,23 +274,12 @@ STAGE PLANS:
                 TableScan
                   alias: str_str_orc
                   Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: COALESCE(str1,0) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
-                        selectExpressions: VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) -> 2:string) -> 3:string
                     Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -363,14 +287,6 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
index 086b9ef..08d49bc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
@@ -62,18 +62,14 @@ POSTHOOK: Lineage: orc_create_complex.str SIMPLE [(orc_create_staging)orc_create
 POSTHOOK: Lineage: orc_create_complex.strct SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:strct, type:struct<A:string,B:string>, comment:null), ]
 orc_create_staging.str	orc_create_staging.mp	orc_create_staging.lst	orc_create_staging.strct
 PREHOOK: query: -- Since complex types are not supported, this query should not vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT * FROM orc_create_complex
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Since complex types are not supported, this query should not vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT * FROM orc_create_complex
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -101,12 +97,6 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type map<string,string> of Column[mp] not supported
-                vectorized: false
 
   Stage: Stage-0
     Fetch Operator
@@ -127,18 +117,14 @@ line1	{"key13":"value13","key11":"value11","key12":"value12"}	["a","b","c"]	{"a"
 line2	{"key21":"value21","key22":"value22","key23":"value23"}	["d","e","f"]	{"a":"three","b":"four"}
 line3	{"key31":"value31","key32":"value32","key33":"value33"}	["g","h","i"]	{"a":"five","b":"six"}
 PREHOOK: query: -- However, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT COUNT(*) FROM orc_create_complex
 PREHOOK: type: QUERY
 POSTHOOK: query: -- However, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT COUNT(*) FROM orc_create_complex
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -156,71 +142,29 @@ STAGE PLANS:
                 TableScan
                   alias: orc_create_complex
                   Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: []
                     Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -244,18 +188,14 @@ POSTHOOK: Input: default@orc_create_complex
 c0
 3
 PREHOOK: query: -- Also, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT str FROM orc_create_complex ORDER BY str
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Also, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT str FROM orc_create_complex ORDER BY str
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -273,59 +213,25 @@ STAGE PLANS:
                 TableScan
                   alias: orc_create_complex
                   Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: str (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 3 Data size: 3177 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
index 99fd25f..97d5642 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
@@ -21,17 +21,13 @@ POSTHOOK: Output: default@test
 POSTHOOK: Lineage: test.a SIMPLE []
 POSTHOOK: Lineage: test.b EXPRESSION []
 c0	c1
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from alltypesorc join test where alltypesorc.cint=test.a
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from alltypesorc join test where alltypesorc.cint=test.a
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -75,12 +71,6 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Small Table expression for MAPJOIN operator: Data type map<int,string> of Column[_col1] not supported
-                vectorized: false
         Map 2 
             Map Operator Tree:
                 TableScan
@@ -101,12 +91,6 @@ STAGE PLANS:
                         value expressions: _col1 (type: map<int,string>)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type map<int,string> of Column[b] not supported
-                vectorized: false
 
   Stage: Stage-0
     Fetch Operator
@@ -162,17 +146,13 @@ POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@test2b
 POSTHOOK: Lineage: test2b.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select *  from test2b join test2a on test2b.a = test2a.a[1]
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select *  from test2b join test2a on test2b.a = test2a.a[1]
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -216,12 +196,6 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Small Table expression for MAPJOIN operator: Data type array<int> of Column[a] not supported
-                vectorized: false
         Map 2 
             Map Operator Tree:
                 TableScan
@@ -238,12 +212,6 @@ STAGE PLANS:
                       value expressions: a (type: array<int>)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Predicate expression for FILTER operator: UDF GenericUDFIndex(Column[a], Const int 1) not supported
-                vectorized: false
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count.q.out b/ql/src/test/results/clientpositive/llap/vector_count.q.out
index c425d8f..3b9d9f9 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count.q.out
@@ -47,14 +47,10 @@ POSTHOOK: Input: default@abcd
 12	100	75	7
 12	NULL	80	2
 NULL	35	23	6
-PREHOOK: query: explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -72,26 +68,12 @@ STAGE PLANS:
                 TableScan
                   alias: abcd
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3]
                     Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 2) -> bigint, VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 1, col 2
-                          native: false
-                          projectedOutputColumns: [0, 1, 2]
                       keys: a (type: int), b (type: int), c (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
@@ -100,30 +82,12 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
                         sort order: +++
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false, No DISTINCT columns IS false
                         Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col5 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: GROUPBY operator: DISTINCT not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col2)
@@ -157,14 +121,10 @@ POSTHOOK: Input: default@abcd
 100	1	1	3
 12	1	2	9
 NULL	1	1	6
-PREHOOK: query: explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -182,26 +142,12 @@ STAGE PLANS:
                 TableScan
                   alias: abcd
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3]
                     Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1), count(), count(_col1), count(_col2), count(_col3), count(_col4), count(DISTINCT _col1), count(DISTINCT _col2), count(DISTINCT _col3), count(DISTINCT _col4), count(DISTINCT _col1, _col2), count(DISTINCT _col2, _col3), count(DISTINCT _col3, _col4), count(DISTINCT _col1, _col4), count(DISTINCT _col1, _col3), count(DISTINCT _col2, _col4), count(DISTINCT _col1, _col2, _col3), count(DISTINCT _col2, _col3, _col4), count(DISTINCT _col1, _col3, _col4), count(DISTINCT _col1, _col2, _col4), count(DISTINCT _col1, _col2, _col3, _col4)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 4:long) -> bigint, VectorUDAFCountStar(*) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 2) -> bigint, VectorUDAFCount(col 3) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 2) -> bigint, VectorUDAFCount(col 3) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 2) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 1) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCount(col 0) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 1, col 2, col 3
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
                       keys: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
@@ -209,30 +155,12 @@ STAGE PLANS:
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int)
                         sort order: ++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false, No DISTINCT columns IS false
                         Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: GROUPBY operator: DISTINCT not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY.
 _col0:14._col3)
@@ -262,14 +190,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@abcd
 #### A masked pattern was here ####
 7	7	6	6	6	7	3	3	6	7	4	5	6	6	5	6	4	5	5	5	4
-PREHOOK: query: explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -287,45 +211,20 @@ STAGE PLANS:
                 TableScan
                   alias: abcd
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: a, b, c, d
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3]
                     Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: a (type: int), b (type: int), c (type: int)
                       sort order: +++
                       Map-reduce partition columns: a (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false, No DISTINCT columns IS false
                       Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: GROUPBY operator: DISTINCT not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col0)
@@ -359,14 +258,10 @@ POSTHOOK: Input: default@abcd
 100	1	1	3
 12	1	2	9
 NULL	1	1	6
-PREHOOK: query: explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -384,43 +279,18 @@ STAGE PLANS:
                 TableScan
                   alias: abcd
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
                     outputColumnNames: _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3]
                     Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int)
                       sort order: ++++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false, No DISTINCT columns IS false
                       Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: GROUPBY operator: DISTINCT not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, K
 EY._col0:14._col3)


[53/62] hive git commit: HIVE-14940: MiniTezCliDriver - switch back to SQL metastore as default (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by we...@apache.org.
HIVE-14940: MiniTezCliDriver - switch back to SQL metastore as default (Prasanth Jayachandran reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4b7f373e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4b7f373e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4b7f373e

Branch: refs/heads/hive-14535
Commit: 4b7f373e58a222cc2bd83ea28b916009d7ebf75b
Parents: 3bab49a
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Mon Oct 17 21:06:56 2016 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Mon Oct 17 21:06:56 2016 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |  5 +-
 .../hadoop/hive/cli/control/CliConfigs.java     |  2 +-
 ...umn_names_with_leading_and_trailing_spaces.q | 15 -----
 .../clientpositive/tez/explainanalyze_5.q.out   |  6 +-
 ...names_with_leading_and_trailing_spaces.q.out | 65 --------------------
 .../clientpositive/tez/unionDistinct_2.q.out    |  6 +-
 .../tez/vector_join_part_col_char.q.out         | 40 ++++++------
 7 files changed, 29 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 97e310d..8aee7f5 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -26,7 +26,8 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
   ql_rewrite_gbtoidx_cbo_1.q,\
   ql_rewrite_gbtoidx_cbo_2.q,\
   rcfile_merge1.q,\
-  smb_mapjoin_8.q
+  smb_mapjoin_8.q,\
+  stats_filemetadata.q
 
 # NOTE: Add tests to minitez only if it is very
 # specific to tez and cannot be added to minillap.
@@ -50,8 +51,6 @@ minitez.query.files=explainuser_3.q,\
   explainanalyze_5.q,\
   hybridgrace_hashjoin_1.q,\
   hybridgrace_hashjoin_2.q,\
-  partition_column_names_with_leading_and_trailing_spaces.q,\
-  stats_filemetadata.q,\
   tez_union_with_udf.q
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 366c7b4..c5e027b 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -129,7 +129,7 @@ public class CliConfigs {
 
         setHiveConfDir("data/conf/tez");
         setClusterType(MiniClusterType.tez);
-        setMetastoreType(MetastoreType.hbase);
+        setMetastoreType(MetastoreType.sql);
         setFsType(QTestUtil.FsType.hdfs);
       } catch (Exception e) {
         throw new RuntimeException("can't construct cliconfig", e);

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/ql/src/test/queries/clientpositive/partition_column_names_with_leading_and_trailing_spaces.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_column_names_with_leading_and_trailing_spaces.q b/ql/src/test/queries/clientpositive/partition_column_names_with_leading_and_trailing_spaces.q
deleted file mode 100644
index f087130..0000000
--- a/ql/src/test/queries/clientpositive/partition_column_names_with_leading_and_trailing_spaces.q
+++ /dev/null
@@ -1,15 +0,0 @@
-set hive.mapred.mode=nonstrict;
-
-create table foo (d string);
-
-create table foo_p (d string) partitioned by (p string);
-
-insert into foo values ("1");
-
-insert into foo_p partition (p="a ") select foo.d from foo;
-
-insert into foo_p partition (p="a") select foo.d from foo;
-
-select * from foo_p where p="a ";
-
-select * from foo_p where p="a";

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
index 7da21db..ff04e98 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
@@ -308,14 +308,14 @@ Stage-3
               Reducer 2
               File Output Operator [FS_8]
                 table:{"name:":"default.acid_uami"}
-                Select Operator [SEL_4] (rows=6/2 width=302)
+                Select Operator [SEL_4] (rows=8/2 width=302)
                   Output:["_col0","_col1","_col2","_col3"]
                 <-Map 1 [SIMPLE_EDGE]
                   SHUFFLE [RS_3]
                     PartitionCols:UDFToInteger(_col0)
-                    Select Operator [SEL_2] (rows=6/2 width=302)
+                    Select Operator [SEL_2] (rows=8/2 width=302)
                       Output:["_col0","_col1","_col3"]
-                      Filter Operator [FIL_9] (rows=6/2 width=226)
+                      Filter Operator [FIL_9] (rows=8/2 width=226)
                         predicate:((de = 109.23) or (de = 119.23))
                         TableScan [TS_0] (rows=8/4 width=226)
                           default@acid_uami,acid_uami, ACID table,Tbl:COMPLETE,Col:COMPLETE,Output:["i","de","vc"]

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out b/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
deleted file mode 100644
index 92fdbe1..0000000
--- a/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
+++ /dev/null
@@ -1,65 +0,0 @@
-PREHOOK: query: create table foo (d string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@foo
-POSTHOOK: query: create table foo (d string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@foo
-PREHOOK: query: create table foo_p (d string) partitioned by (p string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@foo_p
-POSTHOOK: query: create table foo_p (d string) partitioned by (p string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@foo_p
-PREHOOK: query: insert into foo values ("1")
-PREHOOK: type: QUERY
-PREHOOK: Input: default@values__tmp__table__1
-PREHOOK: Output: default@foo
-POSTHOOK: query: insert into foo values ("1")
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@values__tmp__table__1
-POSTHOOK: Output: default@foo
-POSTHOOK: Lineage: foo.d SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: insert into foo_p partition (p="a ") select foo.d from foo
-PREHOOK: type: QUERY
-PREHOOK: Input: default@foo
-PREHOOK: Output: default@foo_p@p=a 
-POSTHOOK: query: insert into foo_p partition (p="a ") select foo.d from foo
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@foo
-POSTHOOK: Output: default@foo_p@p=a 
-POSTHOOK: Lineage: foo_p PARTITION(p=a ).d SIMPLE [(foo)foo.FieldSchema(name:d, type:string, comment:null), ]
-PREHOOK: query: insert into foo_p partition (p="a") select foo.d from foo
-PREHOOK: type: QUERY
-PREHOOK: Input: default@foo
-PREHOOK: Output: default@foo_p@p=a
-POSTHOOK: query: insert into foo_p partition (p="a") select foo.d from foo
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@foo
-POSTHOOK: Output: default@foo_p@p=a
-POSTHOOK: Lineage: foo_p PARTITION(p=a).d SIMPLE [(foo)foo.FieldSchema(name:d, type:string, comment:null), ]
-PREHOOK: query: select * from foo_p where p="a "
-PREHOOK: type: QUERY
-PREHOOK: Input: default@foo_p
-PREHOOK: Input: default@foo_p@p=a 
-#### A masked pattern was here ####
-POSTHOOK: query: select * from foo_p where p="a "
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@foo_p
-POSTHOOK: Input: default@foo_p@p=a 
-#### A masked pattern was here ####
-1	a
-PREHOOK: query: select * from foo_p where p="a"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@foo_p
-PREHOOK: Input: default@foo_p@p=a
-#### A masked pattern was here ####
-POSTHOOK: query: select * from foo_p where p="a"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@foo_p
-POSTHOOK: Input: default@foo_p@p=a
-#### A masked pattern was here ####
-1	a

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out b/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
index bd4ca72..304d74f 100644
--- a/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
@@ -365,7 +365,7 @@ union
 select `u2`.`key`, `u2`.`value` from `default`.`u2`	 	 
 union all	 	 
 select `u3`.`key` as `key`, `u3`.`value` from `default`.`u3`	 	 
-) `tab`, tableType:VIRTUAL_VIEW, privileges:PrincipalPrivilegeSet(userPrivileges:{hive_test_user=[PrivilegeGrantInfo(privilege:INSERT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:SELECT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:UPDATE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:DELETE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true)]}, groupPrivileges:null, rolePrivileges:null))		 
+) `tab`, tableType:VIRTUAL_VIEW)		 
 PREHOOK: query: select * from v
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1
@@ -435,7 +435,7 @@ select distinct * from u2
 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1`	 	 
 union  	 	 
 select distinct `u2`.`key`, `u2`.`value` from `default`.`u2`	 	 
-) `tab`, tableType:VIRTUAL_VIEW, privileges:PrincipalPrivilegeSet(userPrivileges:{hive_test_user=[PrivilegeGrantInfo(privilege:INSERT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:SELECT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:UPDATE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:DELETE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true)]}, groupPrivileges:null, rolePrivileges:null))		 
+) `tab`, tableType:VIRTUAL_VIEW)		 
 PREHOOK: query: select * from v
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1
@@ -500,7 +500,7 @@ select distinct * from u2
 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1`	 	 
 union all 	 	 
 select distinct `u2`.`key`, `u2`.`value` from `default`.`u2`	 	 
-) `tab`, tableType:VIRTUAL_VIEW, privileges:PrincipalPrivilegeSet(userPrivileges:{hive_test_user=[PrivilegeGrantInfo(privilege:INSERT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:SELECT, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:UPDATE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true), PrivilegeGrantInfo(privilege:DELETE, createTime:-1, grantor:hive_test_user, grantorType:USER, grantOption:true)]}, groupPrivileges:null, rolePrivileges:null))		 
+) `tab`, tableType:VIRTUAL_VIEW)		 
 PREHOOK: query: select * from v
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1

http://git-wip-us.apache.org/repos/asf/hive/blob/4b7f373e/ql/src/test/results/clientpositive/tez/vector_join_part_col_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join_part_col_char.q.out b/ql/src/test/results/clientpositive/tez/vector_join_part_col_char.q.out
index 8ce85f6..13bec73 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join_part_col_char.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join_part_col_char.q.out
@@ -87,16 +87,16 @@ PREHOOK: Input: default@char_tbl1
 POSTHOOK: query: show partitions char_tbl1
 POSTHOOK: type: SHOWPARTITIONS
 POSTHOOK: Input: default@char_tbl1
-gpa=2.5
-gpa=3.5
+gpa=2.5                                               
+gpa=3.5                                               
 PREHOOK: query: show partitions char_tbl2
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@char_tbl2
 POSTHOOK: query: show partitions char_tbl2
 POSTHOOK: type: SHOWPARTITIONS
 POSTHOOK: Input: default@char_tbl2
-gpa=3
-gpa=3.5
+gpa=3    
+gpa=3.5  
 PREHOOK: query: explain select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
@@ -138,38 +138,38 @@ Stage-0
 PREHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@char_tbl1
-PREHOOK: Input: default@char_tbl1@gpa=2.5
-PREHOOK: Input: default@char_tbl1@gpa=3.5
+PREHOOK: Input: default@char_tbl1@gpa=2.5                                               
+PREHOOK: Input: default@char_tbl1@gpa=3.5                                               
 PREHOOK: Input: default@char_tbl2
-PREHOOK: Input: default@char_tbl2@gpa=3
-PREHOOK: Input: default@char_tbl2@gpa=3.5
+PREHOOK: Input: default@char_tbl2@gpa=3    
+PREHOOK: Input: default@char_tbl2@gpa=3.5  
 #### A masked pattern was here ####
 POSTHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@char_tbl1
-POSTHOOK: Input: default@char_tbl1@gpa=2.5
-POSTHOOK: Input: default@char_tbl1@gpa=3.5
+POSTHOOK: Input: default@char_tbl1@gpa=2.5                                               
+POSTHOOK: Input: default@char_tbl1@gpa=3.5                                               
 POSTHOOK: Input: default@char_tbl2
-POSTHOOK: Input: default@char_tbl2@gpa=3
-POSTHOOK: Input: default@char_tbl2@gpa=3.5
+POSTHOOK: Input: default@char_tbl2@gpa=3    
+POSTHOOK: Input: default@char_tbl2@gpa=3.5  
 #### A masked pattern was here ####
 alice underhill	46	3.5                                               	alice underhill	46	3.5  
 PREHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@char_tbl1
-PREHOOK: Input: default@char_tbl1@gpa=2.5
-PREHOOK: Input: default@char_tbl1@gpa=3.5
+PREHOOK: Input: default@char_tbl1@gpa=2.5                                               
+PREHOOK: Input: default@char_tbl1@gpa=3.5                                               
 PREHOOK: Input: default@char_tbl2
-PREHOOK: Input: default@char_tbl2@gpa=3
-PREHOOK: Input: default@char_tbl2@gpa=3.5
+PREHOOK: Input: default@char_tbl2@gpa=3    
+PREHOOK: Input: default@char_tbl2@gpa=3.5  
 #### A masked pattern was here ####
 POSTHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@char_tbl1
-POSTHOOK: Input: default@char_tbl1@gpa=2.5
-POSTHOOK: Input: default@char_tbl1@gpa=3.5
+POSTHOOK: Input: default@char_tbl1@gpa=2.5                                               
+POSTHOOK: Input: default@char_tbl1@gpa=3.5                                               
 POSTHOOK: Input: default@char_tbl2
-POSTHOOK: Input: default@char_tbl2@gpa=3
-POSTHOOK: Input: default@char_tbl2@gpa=3.5
+POSTHOOK: Input: default@char_tbl2@gpa=3    
+POSTHOOK: Input: default@char_tbl2@gpa=3.5  
 #### A masked pattern was here ####
 alice underhill	46	3.5                                               	alice underhill	46	3.5  


[47/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index 77b44fb..c288731 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.commons.lang.ArrayUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -57,7 +56,6 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinaryDeserializeRead;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
@@ -65,8 +63,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
-import com.google.common.base.Preconditions;
-
 /**
  * This class is common operator class for native vectorized map join.
  *
@@ -76,43 +72,7 @@ import com.google.common.base.Preconditions;
  */
 public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implements VectorizationContextRegion {
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
-  private static final String CLASS_NAME = VectorMapJoinCommonOperator.class.getName();
-private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected abstract String getLoggingPrefix();
-
-  // For debug tracing: information about the map or reduce task, operator, operator class, etc.
-  protected transient String loggingPrefix;
-
-  protected String getLoggingPrefix(String className) {
-    if (loggingPrefix == null) {
-      initLoggingPrefix(className);
-    }
-    return loggingPrefix;
-  }
-
-  protected void initLoggingPrefix(String className) {
-    if (hconf == null) {
-      // Constructor time...
-      loggingPrefix = className;
-    } else {
-      // Determine the name of our map or reduce task for debug tracing.
-      BaseWork work = Utilities.getMapWork(hconf);
-      if (work == null) {
-        work = Utilities.getReduceWork(hconf);
-      }
-      loggingPrefix = className + " " + work.getName() + " " + getOperatorId();
-    }
-  }
-
-  //------------------------------------------------------------------------------------------------
-
-  protected VectorMapJoinDesc vectorDesc;
-
-  protected VectorMapJoinInfo vectorMapJoinInfo;
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinCommonOperator.class.getName());
 
   // Whether this operator is an outer join.
   protected boolean isOuterJoin;
@@ -128,10 +88,10 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   // a mixture of input big table columns and new scratch columns.
   protected VectorizationContext vOutContext;
 
-  // The output column projection of the vectorized row batch.  And, the type infos of the output
+  // The output column projection of the vectorized row batch.  And, the type names of the output
   // columns.
   protected int[] outputProjection;
-  protected TypeInfo[] outputTypeInfos;
+  protected String[] outputTypeNames;
 
   // These are the vectorized batch expressions for filtering, key expressions, and value
   // expressions.
@@ -141,17 +101,15 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
   // This is map of which vectorized row batch columns are the big table key columns.  Since
   // we may have key expressions that produce new scratch columns, we need a mapping.
-  // And, we have their type infos.
+  // And, we have their type names.
   protected int[] bigTableKeyColumnMap;
-  protected String[] bigTableKeyColumnNames;
-  protected TypeInfo[] bigTableKeyTypeInfos;
+  protected ArrayList<String> bigTableKeyTypeNames;
 
   // Similarly, this is map of which vectorized row batch columns are the big table value columns.
   // Since we may have value expressions that produce new scratch columns, we need a mapping.
-  // And, we have their type infos.
+  // And, we have their type names.
   protected int[] bigTableValueColumnMap;
-  protected String[] bigTableValueColumnNames;
-  protected TypeInfo[] bigTableValueTypeInfos;
+  protected ArrayList<String> bigTableValueTypeNames;
 
   // This is a mapping of which big table columns (input and key/value expressions) will be
   // part of the big table portion of the join output result.
@@ -166,8 +124,6 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   // to output batch scratch columns for the small table portion.
   protected VectorColumnSourceMapping smallTableMapping;
 
-  protected VectorColumnSourceMapping projectionMapping;
-
   // These are the output columns for the small table and the outer small table keys.
   protected int[] smallTableOutputVectorColumns;
   protected int[] bigTableOuterKeyOutputVectorColumns;
@@ -181,6 +137,9 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   // transient.
   //---------------------------------------------------------------------------
 
+  // For debug tracing: the name of the map or reduce task.
+  protected transient String taskName;
+
   // The threshold where we should use a repeating vectorized row batch optimization for
   // generating join output results.
   protected transient boolean useOverflowRepeatedThreshold;
@@ -233,9 +192,6 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
     MapJoinDesc desc = (MapJoinDesc) conf;
     this.conf = desc;
-    vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
-    vectorMapJoinInfo = vectorDesc.getVectorMapJoinInfo();
-    Preconditions.checkState(vectorMapJoinInfo != null);
 
     this.vContext = vContext;
 
@@ -254,29 +210,215 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     bigTableFilterExpressions = vContext.getVectorExpressions(filterExpressions.get(posBigTable),
         VectorExpressionDescriptor.Mode.FILTER);
 
-    bigTableKeyColumnMap = vectorMapJoinInfo.getBigTableKeyColumnMap();
-    bigTableKeyColumnNames = vectorMapJoinInfo.getBigTableKeyColumnNames();
-    bigTableKeyTypeInfos = vectorMapJoinInfo.getBigTableKeyTypeInfos();
-    bigTableKeyExpressions = vectorMapJoinInfo.getBigTableKeyExpressions();
-
-    bigTableValueColumnMap = vectorMapJoinInfo.getBigTableValueColumnMap();
-    bigTableValueColumnNames = vectorMapJoinInfo.getBigTableValueColumnNames();
-    bigTableValueTypeInfos = vectorMapJoinInfo.getBigTableValueTypeInfos();
-    bigTableValueExpressions = vectorMapJoinInfo.getBigTableValueExpressions();
-
-    bigTableRetainedMapping = vectorMapJoinInfo.getBigTableRetainedMapping();
-
-    bigTableOuterKeyMapping =  vectorMapJoinInfo.getBigTableOuterKeyMapping();
+    List<ExprNodeDesc> keyDesc = desc.getKeys().get(posBigTable);
+    bigTableKeyExpressions = vContext.getVectorExpressions(keyDesc);
+
+    // Since a key expression can be a calculation and the key will go into a scratch column,
+    // we need the mapping and type information.
+    bigTableKeyColumnMap = new int[bigTableKeyExpressions.length];
+    bigTableKeyTypeNames = new ArrayList<String>();
+    boolean onlyColumns = true;
+    for (int i = 0; i < bigTableKeyColumnMap.length; i++) {
+      VectorExpression ve = bigTableKeyExpressions[i];
+      if (!IdentityExpression.isColumnOnly(ve)) {
+        onlyColumns = false;
+      }
+      bigTableKeyTypeNames.add(keyDesc.get(i).getTypeString());
+      bigTableKeyColumnMap[i] = ve.getOutputColumn();
+    }
+    if (onlyColumns) {
+      bigTableKeyExpressions = null;
+    }
 
-    smallTableMapping = vectorMapJoinInfo.getSmallTableMapping();
+    List<ExprNodeDesc> bigTableExprs = desc.getExprs().get(posBigTable);
+    bigTableValueExpressions = vContext.getVectorExpressions(bigTableExprs);
 
-    projectionMapping = vectorMapJoinInfo.getProjectionMapping();
+    /*
+     * Similarly, we need a mapping since a value expression can be a calculation and the value
+     * will go into a scratch column.
+     */
+    bigTableValueColumnMap = new int[bigTableValueExpressions.length];
+    bigTableValueTypeNames = new ArrayList<String>();
+    onlyColumns = true;
+    for (int i = 0; i < bigTableValueColumnMap.length; i++) {
+      VectorExpression ve = bigTableValueExpressions[i];
+      if (!IdentityExpression.isColumnOnly(ve)) {
+        onlyColumns = false;
+      }
+      bigTableValueTypeNames.add(bigTableExprs.get(i).getTypeString());
+      bigTableValueColumnMap[i] = ve.getOutputColumn();
+    }
+    if (onlyColumns) {
+      bigTableValueExpressions = null;
+    }
 
     determineCommonInfo(isOuterJoin);
   }
 
   protected void determineCommonInfo(boolean isOuter) throws HiveException {
 
+    bigTableRetainedMapping = new VectorColumnOutputMapping("Big Table Retained Mapping");
+
+    bigTableOuterKeyMapping = new VectorColumnOutputMapping("Big Table Outer Key Mapping");
+
+    // The order of the fields in the LazyBinary small table value must be used, so
+    // we use the source ordering flavor for the mapping.
+    smallTableMapping = new VectorColumnSourceMapping("Small Table Mapping");
+
+    // We use a mapping object here so we can build the projection in any order and
+    // get the ordered by 0 to n-1 output columns at the end.
+    //
+    // Also, to avoid copying a big table key into the small table result area for inner joins,
+    // we reference it with the projection so there can be duplicate output columns
+    // in the projection.
+    VectorColumnSourceMapping projectionMapping = new VectorColumnSourceMapping("Projection Mapping");
+
+    /*
+     * Gather up big and small table output result information from the MapJoinDesc.
+     */
+    List<Integer> bigTableRetainList = conf.getRetainList().get(posBigTable);
+    int bigTableRetainSize = bigTableRetainList.size();
+
+    int[] smallTableIndices;
+    int smallTableIndicesSize;
+    List<ExprNodeDesc> smallTableExprs = conf.getExprs().get(posSingleVectorMapJoinSmallTable);
+    if (conf.getValueIndices() != null && conf.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) {
+      smallTableIndices = conf.getValueIndices().get(posSingleVectorMapJoinSmallTable);
+      smallTableIndicesSize = smallTableIndices.length;
+    } else {
+      smallTableIndices = null;
+      smallTableIndicesSize = 0;
+    }
+
+    List<Integer> smallTableRetainList = conf.getRetainList().get(posSingleVectorMapJoinSmallTable);
+    int smallTableRetainSize = smallTableRetainList.size();
+
+    int smallTableResultSize = 0;
+    if (smallTableIndicesSize > 0) {
+      smallTableResultSize = smallTableIndicesSize;
+    } else if (smallTableRetainSize > 0) {
+      smallTableResultSize = smallTableRetainSize;
+    }
+
+    /*
+     * Determine the big table retained mapping first so we can optimize out (with
+     * projection) copying inner join big table keys in the subsequent small table results section.
+     */
+    int nextOutputColumn = (order[0] == posBigTable ? 0 : smallTableResultSize);
+    for (int i = 0; i < bigTableRetainSize; i++) {
+
+      // Since bigTableValueExpressions may do a calculation and produce a scratch column, we
+      // need to map to the right batch column.
+
+      int retainColumn = bigTableRetainList.get(i);
+      int batchColumnIndex = bigTableValueColumnMap[retainColumn];
+      String typeName = bigTableValueTypeNames.get(i);
+
+      // With this map we project the big table batch to make it look like an output batch.
+      projectionMapping.add(nextOutputColumn, batchColumnIndex, typeName);
+
+      // Collect columns we copy from the big table batch to the overflow batch.
+      if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
+        // Tolerate repeated use of a big table column.
+        bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName);
+      }
+
+      nextOutputColumn++;
+    }
+
+    /*
+     * Now determine the small table results.
+     */
+    int firstSmallTableOutputColumn;
+    firstSmallTableOutputColumn = (order[0] == posBigTable ? bigTableRetainSize : 0);
+    int smallTableOutputCount = 0;
+    nextOutputColumn = firstSmallTableOutputColumn;
+
+    // Small table indices has more information (i.e. keys) than retain, so use it if it exists...
+    if (smallTableIndicesSize > 0) {
+      smallTableOutputCount = smallTableIndicesSize;
+
+      for (int i = 0; i < smallTableIndicesSize; i++) {
+        if (smallTableIndices[i] >= 0) {
+
+          // Zero and above numbers indicate a big table key is needed for
+          // small table result "area".
+
+          int keyIndex = smallTableIndices[i];
+
+          // Since bigTableKeyExpressions may do a calculation and produce a scratch column, we
+          // need to map the right column.
+          int batchKeyColumn = bigTableKeyColumnMap[keyIndex];
+          String typeName = bigTableKeyTypeNames.get(keyIndex);
+
+          if (!isOuter) {
+
+            // Optimize inner join keys of small table results.
+
+            // Project the big table key into the small table result "area".
+            projectionMapping.add(nextOutputColumn, batchKeyColumn, typeName);
+
+            if (!bigTableRetainedMapping.containsOutputColumn(batchKeyColumn)) {
+              // If necessary, copy the big table key into the overflow batch's small table
+              // result "area".
+              bigTableRetainedMapping.add(batchKeyColumn, batchKeyColumn, typeName);
+            }
+          } else {
+
+            // For outer joins, since the small table key can be null when there is no match,
+            // we must have a physical (scratch) column for those keys.  We cannot use the
+            // projection optimization used by inner joins above.
+
+            int scratchColumn = vOutContext.allocateScratchColumn(typeName);
+            projectionMapping.add(nextOutputColumn, scratchColumn, typeName);
+
+            bigTableRetainedMapping.add(batchKeyColumn, scratchColumn, typeName);
+
+            bigTableOuterKeyMapping.add(batchKeyColumn, scratchColumn, typeName);
+          }
+        } else {
+
+          // Negative numbers indicate a column to be (deserialize) read from the small table's
+          // LazyBinary value row.
+          int smallTableValueIndex = -smallTableIndices[i] - 1;
+
+          String typeName = smallTableExprs.get(i).getTypeString();
+
+          // Make a new big table scratch column for the small table value.
+          int scratchColumn = vOutContext.allocateScratchColumn(typeName);
+          projectionMapping.add(nextOutputColumn, scratchColumn, typeName);
+
+          smallTableMapping.add(smallTableValueIndex, scratchColumn, typeName);
+        }
+        nextOutputColumn++;
+      }
+    } else if (smallTableRetainSize > 0) {
+      smallTableOutputCount = smallTableRetainSize;
+
+      // Only small table values appear in join output result.
+
+      for (int i = 0; i < smallTableRetainSize; i++) {
+        int smallTableValueIndex = smallTableRetainList.get(i);
+
+        // Make a new big table scratch column for the small table value.
+        String typeName = smallTableExprs.get(i).getTypeString();
+        int scratchColumn = vOutContext.allocateScratchColumn(typeName);
+
+        projectionMapping.add(nextOutputColumn, scratchColumn, typeName);
+
+        smallTableMapping.add(smallTableValueIndex, scratchColumn, typeName);
+        nextOutputColumn++;
+      }
+    }
+
+    // Convert dynamic arrays and maps to simple arrays.
+
+    bigTableRetainedMapping.finalize();
+
+    bigTableOuterKeyMapping.finalize();
+
+    smallTableMapping.finalize();
+
     bigTableOuterKeyOutputVectorColumns = bigTableOuterKeyMapping.getOutputColumns();
     smallTableOutputVectorColumns = smallTableMapping.getOutputColumns();
 
@@ -287,37 +429,46 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
     smallTableByteColumnVectorColumns = getByteColumnVectorColumns(smallTableMapping);
 
+    projectionMapping.finalize();
+
+    // Verify we added an entry for each output.
+    assert projectionMapping.isSourceSequenceGood();
+
     outputProjection = projectionMapping.getOutputColumns();
-    outputTypeInfos = projectionMapping.getTypeInfos();
+    outputTypeNames = projectionMapping.getTypeNames();
 
     if (isLogDebugEnabled) {
       int[] orderDisplayable = new int[order.length];
       for (int i = 0; i < order.length; i++) {
         orderDisplayable[i] = (int) order[i];
       }
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor order " + Arrays.toString(orderDisplayable));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor posBigTable " + (int) posBigTable);
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor posSingleVectorMapJoinSmallTable " + (int) posSingleVectorMapJoinSmallTable);
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor order " + Arrays.toString(orderDisplayable));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor posBigTable " + (int) posBigTable);
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor posSingleVectorMapJoinSmallTable " + (int) posSingleVectorMapJoinSmallTable);
+
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableKeyColumnMap " + Arrays.toString(bigTableKeyColumnMap));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableKeyTypeNames " + bigTableKeyTypeNames);
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableKeyColumnMap " + Arrays.toString(bigTableKeyColumnMap));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableKeyColumnNames " + Arrays.toString(bigTableKeyColumnNames));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableKeyTypeInfos " + Arrays.toString(bigTableKeyTypeInfos));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableValueColumnMap " + Arrays.toString(bigTableValueColumnMap));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableValueTypeNames " + bigTableValueTypeNames);
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableValueColumnMap " + Arrays.toString(bigTableValueColumnMap));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableValueColumnNames " + Arrays.toString(bigTableValueColumnNames));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableValueTypeNames " + Arrays.toString(bigTableValueTypeInfos));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableIndices " + Arrays.toString(smallTableIndices));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableRetainList " + smallTableRetainList);
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableRetainedMapping " + bigTableRetainedMapping.toString());
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor firstSmallTableOutputColumn " + firstSmallTableOutputColumn);
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableOutputCount " + smallTableOutputCount);
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableOuterKeyMapping " + bigTableOuterKeyMapping.toString());
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableRetainedMapping " + bigTableRetainedMapping.toString());
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor smallTableMapping " + smallTableMapping.toString());
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableOuterKeyMapping " + bigTableOuterKeyMapping.toString());
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor bigTableByteColumnVectorColumns " + Arrays.toString(bigTableByteColumnVectorColumns));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor smallTableByteColumnVectorColumns " + Arrays.toString(smallTableByteColumnVectorColumns));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableMapping " + smallTableMapping.toString());
 
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputProjection " + Arrays.toString(outputProjection));
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputTypeInfos " + Arrays.toString(outputTypeInfos));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor bigTableByteColumnVectorColumns " + Arrays.toString(bigTableByteColumnVectorColumns));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor smallTableByteColumnVectorColumns " + Arrays.toString(smallTableByteColumnVectorColumns));
+
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputProjection " + Arrays.toString(outputProjection));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputTypeNames " + Arrays.toString(outputTypeNames));
     }
 
     setupVOutContext(conf.getOutputColumnNames());
@@ -331,10 +482,10 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     ArrayList<Integer> list = new ArrayList<Integer>();
     int count = mapping.getCount();
     int[] outputColumns = mapping.getOutputColumns();
-    TypeInfo[] typeInfos = mapping.getTypeInfos();
+    String[] typeNames = mapping.getTypeNames();
     for (int i = 0; i < count; i++) {
       int outputColumn = outputColumns[i];
-      String typeName = typeInfos[i].getTypeName();
+      String typeName = typeNames[i];
       if (VectorizationContext.isStringFamily(typeName)) {
         list.add(outputColumn);
       }
@@ -349,10 +500,10 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
    */
   protected void setupVOutContext(List<String> outputColumnNames) {
     if (isLogDebugEnabled) {
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames);
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames);
     }
     if (outputColumnNames.size() != outputProjection.length) {
-      throw new RuntimeException("Output column names " + outputColumnNames + " length and output projection " + Arrays.toString(outputProjection) + " / " + Arrays.toString(outputTypeInfos) + " length mismatch");
+      throw new RuntimeException("Output column names " + outputColumnNames + " length and output projection " + Arrays.toString(outputProjection) + " / " + Arrays.toString(outputTypeNames) + " length mismatch");
     }
     vOutContext.resetProjectionColumns();
     for (int i = 0; i < outputColumnNames.size(); ++i) {
@@ -361,7 +512,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
       vOutContext.addProjectionColumn(columnName, outputColumn);
 
       if (isLogDebugEnabled) {
-        LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn);
+        LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn);
       }
     }
   }
@@ -371,7 +522,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
    */
   @Override
   protected HashTableLoader getHashTableLoader(Configuration hconf) {
-    VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) conf.getVectorDesc();
+    VectorMapJoinDesc vectorDesc = conf.getVectorDesc();
     HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType();
     HashTableLoader hashTableLoader;
     switch (vectorDesc.hashTableImplementationType()) {
@@ -395,6 +546,15 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   protected void initializeOp(Configuration hconf) throws HiveException {
     super.initializeOp(hconf);
 
+    if (isLogDebugEnabled) {
+      // Determine the name of our map or reduce task for debug tracing.
+      BaseWork work = Utilities.getMapWork(hconf);
+      if (work == null) {
+        work = Utilities.getReduceWork(hconf);
+      }
+      taskName = work.getName();
+    }
+
     /*
      * Get configuration parameters.
      */
@@ -410,8 +570,9 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
       smallTableVectorDeserializeRow =
           new VectorDeserializeRow<LazyBinaryDeserializeRead>(
               new LazyBinaryDeserializeRead(
-                  smallTableMapping.getTypeInfos(),
-                  /* useExternalBuffer */ true));
+                  VectorizedBatchUtil.typeInfosFromTypeNames(
+                      smallTableMapping.getTypeNames()),
+                      /* useExternalBuffer */ true));
       smallTableVectorDeserializeRow.init(smallTableMapping.getOutputColumns());
     }
 
@@ -435,13 +596,13 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
     if (isLogDebugEnabled) {
       int[] currentScratchColumns = vOutContext.currentScratchColumns();
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns));
+      LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns));
 
       StructObjectInspector structOutputObjectInspector = (StructObjectInspector) outputObjInspector;
       List<? extends StructField> fields = structOutputObjectInspector.getAllStructFieldRefs();
       int i = 0;
       for (StructField field : fields) {
-        LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp " + i + " field " + field.getFieldName() + " type " + field.getFieldObjectInspector().getTypeName());
+        LOG.debug("VectorMapJoinInnerBigOnlyCommonOperator initializeOp " + i + " field " + field.getFieldName() + " type " + field.getFieldObjectInspector().getTypeName());
         i++;
       }
     }
@@ -452,7 +613,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     // setup mapJoinTables and serdes
     super.completeInitializationOp(os);
 
-    VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) conf.getVectorDesc();
+    VectorMapJoinDesc vectorDesc = conf.getVectorDesc();
     HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType();
     switch (vectorDesc.hashTableImplementationType()) {
     case OPTIMIZED:
@@ -494,7 +655,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
     // First, just allocate just the projection columns we will be using.
     for (int i = 0; i < outputProjection.length; i++) {
       int outputColumn = outputProjection[i];
-      String typeName = outputTypeInfos[i].getTypeName();
+      String typeName = outputTypeNames[i];
       allocateOverflowBatchColumnVector(overflowBatch, outputColumn, typeName);
     }
 
@@ -526,7 +687,7 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
       overflowBatch.cols[outputColumn] = VectorizedBatchUtil.createColumnVector(typeInfo);
 
       if (isLogDebugEnabled) {
-        LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName());
+        LOG.debug(taskName + ", " + getOperatorId() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName());
       }
     }
   }
@@ -563,9 +724,9 @@ private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   }
 
   protected void displayBatchColumns(VectorizedRowBatch batch, String batchName) {
-    LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator commonSetup " + batchName + " column count " + batch.numCols);
+    LOG.debug("commonSetup " + batchName + " column count " + batch.numCols);
     for (int column = 0; column < batch.numCols; column++) {
-      LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator commonSetup " + batchName + "     column " + column + " type " + (batch.cols[column] == null ? "NULL" : batch.cols[column].getClass().getSimpleName()));
+      LOG.debug("commonSetup " + batchName + "     column " + column + " type " + (batch.cols[column] == null ? "NULL" : batch.cols[column].getClass().getSimpleName()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
index 43f3951..0bba141 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
@@ -45,17 +45,8 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyLongOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
index 95fb0c2..621804b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
@@ -40,8 +40,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
 
-import com.google.common.base.Preconditions;
-
 /*
  * Specialized class for doing a vectorized map join that is an inner join on Multi-Key
  * and only big table columns appear in the join result so a hash multi-set is used.
@@ -50,17 +48,8 @@ import com.google.common.base.Preconditions;
 public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyMultiKeyOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyMultiKeyOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 
@@ -125,7 +114,7 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
 
         keyVectorSerializeWrite = new VectorSerializeRow(
                                         new BinarySortableSerializeWrite(bigTableKeyColumnMap.length));
-        keyVectorSerializeWrite.init(bigTableKeyTypeInfos, bigTableKeyColumnMap);
+        keyVectorSerializeWrite.init(bigTableKeyTypeNames, bigTableKeyColumnMap);
 
         currentKeyOutput = new Output();
         saveKeyOutput = new Output();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
index 044e3e6..10e75ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
@@ -46,17 +46,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerBigOnlyGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyStringOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinInnerBigOnlyStringOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
index c85e1d8..804d69c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
@@ -44,17 +44,8 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinInnerLongOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
index a108cd0..fcfa0bd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
@@ -39,8 +39,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
 
-import com.google.common.base.Preconditions;
-
 /*
  * Specialized class for doing a vectorized map join that is an inner join on a Multi-Key
  * using a hash map.
@@ -48,17 +46,8 @@ import com.google.common.base.Preconditions;
 public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
-  private static final String CLASS_NAME = VectorMapJoinInnerMultiKeyOperator.class.getName();
   private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerMultiKeyOperator.class.getName());
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
+  private static final String CLASS_NAME = VectorMapJoinInnerMultiKeyOperator.class.getName();
 
   // (none)
 
@@ -123,7 +112,7 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
 
         keyVectorSerializeWrite = new VectorSerializeRow(
                                         new BinarySortableSerializeWrite(bigTableKeyColumnMap.length));
-        keyVectorSerializeWrite.init(bigTableKeyTypeInfos, bigTableKeyColumnMap);
+        keyVectorSerializeWrite.init(bigTableKeyTypeNames, bigTableKeyColumnMap);
 
         currentKeyOutput = new Output();
         saveKeyOutput = new Output();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
index 3211d7d..0f9baae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
@@ -45,17 +45,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerStringOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinInnerStringOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
index b02e6fd..1149a9d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
@@ -45,17 +45,8 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinLeftSemiLongOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
index 36b8f3f..e0baebc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
@@ -40,8 +40,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
 
-import com.google.common.base.Preconditions;
-
 /*
  * Specialized class for doing a vectorized map join that is an left semi join on Multi-Key
  * using hash set.
@@ -49,17 +47,8 @@ import com.google.common.base.Preconditions;
 public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemiGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinLeftSemiMultiKeyOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 
@@ -124,7 +113,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
 
         keyVectorSerializeWrite = new VectorSerializeRow(
                                         new BinarySortableSerializeWrite(bigTableKeyColumnMap.length));
-        keyVectorSerializeWrite.init(bigTableKeyTypeInfos, bigTableKeyColumnMap);
+        keyVectorSerializeWrite.init(bigTableKeyTypeNames, bigTableKeyColumnMap);
 
         currentKeyOutput = new Output();
         saveKeyOutput = new Output();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
index 0b3de0a..49e1177 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
@@ -46,17 +46,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinInnerBigOnlyLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinLeftSemiStringOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
index 72309e8..58bd0ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java
@@ -45,17 +45,8 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
  */
 public class VectorMapJoinOuterLongOperator extends VectorMapJoinOuterGenerateResultOperator {
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinOuterLongOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinOuterLongOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
index a4fc7d3..7f9afd2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java
@@ -40,8 +40,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
 import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite;
 
-import com.google.common.base.Preconditions;
-
 /*
  * Specialized class for doing a vectorized map join that is an outer join on Multi-Key
  * using a hash map.
@@ -49,17 +47,8 @@ import com.google.common.base.Preconditions;
 public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinOuterMultiKeyOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinOuterMultiKeyOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 
@@ -123,7 +112,7 @@ public class VectorMapJoinOuterMultiKeyOperator extends VectorMapJoinOuterGenera
 
         keyVectorSerializeWrite = new VectorSerializeRow(
                         new BinarySortableSerializeWrite(bigTableKeyColumnMap.length));
-        keyVectorSerializeWrite.init(bigTableKeyTypeInfos, bigTableKeyColumnMap);
+        keyVectorSerializeWrite.init(bigTableKeyTypeNames, bigTableKeyColumnMap);
 
         currentKeyOutput = new Output();
         saveKeyOutput = new Output();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
index 6e7e5cb..8ed1ed4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java
@@ -45,17 +45,8 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 public class VectorMapJoinOuterStringOperator extends VectorMapJoinOuterGenerateResultOperator {
 
   private static final long serialVersionUID = 1L;
-
-  //------------------------------------------------------------------------------------------------
-
+  private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinOuterStringOperator.class.getName());
   private static final String CLASS_NAME = VectorMapJoinOuterStringOperator.class.getName();
-  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
-
-  protected String getLoggingPrefix() {
-    return super.getLoggingPrefix(CLASS_NAME);
-  }
-
-  //------------------------------------------------------------------------------------------------
 
   // (none)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
index 069cc9a..9f3b107 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
@@ -94,7 +94,7 @@ public class VectorMapJoinFastTableContainer implements VectorMapJoinTableContai
   private VectorMapJoinFastHashTable createHashTable(int newThreshold) {
 
     boolean isOuterJoin = !desc.isNoOuterJoin();
-    VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
+    VectorMapJoinDesc vectorDesc = desc.getVectorDesc();
     HashTableImplementationType hashTableImplementationType = vectorDesc.hashTableImplementationType();
     HashTableKind hashTableKind = vectorDesc.hashTableKind();
     HashTableKeyType hashTableKeyType = vectorDesc.hashTableKeyType();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java
index 111a6d2..f34b1cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java
@@ -40,7 +40,7 @@ public class VectorMapJoinOptimizedCreateHashTable {
     ReusableGetAdaptor hashMapRowGetter = mapJoinTableContainer.createGetter(refKey);
 
     boolean isOuterJoin = !desc.isNoOuterJoin();
-    VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
+    VectorMapJoinDesc vectorDesc = desc.getVectorDesc();
     HashTableKind hashTableKind = vectorDesc.hashTableKind();
     HashTableKeyType hashTableKeyType = vectorDesc.hashTableKeyType();
     boolean minMaxEnabled = vectorDesc.minMaxEnabled();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
index 42ca4b7..8133aef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
@@ -160,7 +160,7 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
 
     ReduceSinkDesc desc = (ReduceSinkDesc) conf;
     this.conf = desc;
-    vectorDesc = (VectorReduceSinkDesc) desc.getVectorDesc();
+    vectorDesc = desc.getVectorDesc();
     vectorReduceSinkInfo = vectorDesc.getVectorReduceSinkInfo();
     this.vContext = vContext;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index 6806ab4..20cfb89 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -374,11 +374,6 @@ public class VectorUDFAdaptor extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return expr.getExprString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder()).build();
   }


[18/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join3.q.out
index fbd294e..5729237 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join3.q.out
@@ -226,7 +226,7 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -235,7 +235,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -244,7 +244,112 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int), cstring1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 40 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col1 (type: string)
+                          1 _col0 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 80 Data size: 640 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cstring1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1
@@ -270,7 +375,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
 #### A masked pattern was here ####
 20
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -279,7 +384,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -288,7 +393,112 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 2939 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 2939 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: string)
+                        1 _col0 (type: string)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 21 Data size: 1869 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 42 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cstring2 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 20 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cstring1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 20 Data size: 979 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1
@@ -314,7 +524,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
 #### A masked pattern was here ####
 28
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -323,7 +533,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1 and hd.cint = c.cint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -332,7 +542,112 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1 and hd.cint = c.cint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 20 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: bigint), _col3 (type: string)
+                        1 _col0 (type: bigint), _col1 (type: string)
+                      outputColumnNames: _col0, _col2
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: int), _col2 (type: string)
+                          1 _col0 (type: int), _col1 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 2120 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cbigint (type: bigint), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 2120 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: bigint), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: bigint), _col1 (type: string)
+                      Statistics: Num rows: 20 Data size: 2120 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int), cstring1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                      Statistics: Num rows: 20 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join4.q.out
index b9b97f6..75d783f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join4.q.out
@@ -246,19 +246,82 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select * 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
   on cd.cint = c.cint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select * 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
   on cd.cint = c.cint
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 7167 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                    Statistics: Num rows: 30 Data size: 7167 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col2 (type: int)
+                        1 _col2 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 81 Data size: 49734 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 81 Data size: 49734 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 30 Data size: 7167 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                    Statistics: Num rows: 30 Data size: 7167 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col2 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: int)
+                      Statistics: Num rows: 30 Data size: 7167 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select * 
@@ -331,19 +394,81 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false	NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false	NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false	NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 450 Data size: 1800 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 450 Data size: 1800 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select c.ctinyint 
@@ -772,7 +897,7 @@ NULL
 NULL
 NULL
 NULL
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
@@ -781,7 +906,7 @@ left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
@@ -790,7 +915,112 @@ left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 30 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 81 Data size: 324 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: tinyint)
+                          1 _col0 (type: tinyint)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 1215 Data size: 9720 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 30 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.ctinyint

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join5.q.out
index eb61044..8a18738 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join5.q.out
@@ -66,21 +66,100 @@ POSTHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6444 Data size: 51552 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
@@ -100,21 +179,103 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6444 Data size: 51552 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -134,21 +295,103 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6058
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6444 Data size: 51552 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -168,21 +411,103 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6248
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 100)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6444 Data size: 51552 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -202,7 +527,7 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -211,7 +536,7 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -220,7 +545,119 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 4
+                      Statistics: Num rows: 151450 Data size: 605800 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 151450 Data size: 605800 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: (_col0 pmod 8) (type: bigint)
+                      sort order: +
+                      Map-reduce partition columns: (_col0 pmod 8) (type: bigint)
+                      Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: tinyint)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: tinyint)
+                      Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                keys:
+                  0 _col0 (type: tinyint)
+                  1 _col0 (type: tinyint)
+                Statistics: Num rows: 9760469 Data size: 78083752 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -306,21 +743,100 @@ POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 40386 Data size: 323088 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
@@ -340,21 +856,103 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 39112
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 40386 Data size: 323088 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -374,21 +972,103 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 11171
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((_col0 pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 40386 Data size: 323088 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -408,21 +1088,103 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 14371
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 3)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 40386 Data size: 323088 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 100 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -442,7 +1204,7 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 17792
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -451,7 +1213,7 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -460,7 +1222,119 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: COMPLETE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 4
+                      Statistics: Num rows: 151450 Data size: 605800 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 151450 Data size: 605800 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: (_col0 pmod 8) (type: bigint)
+                      sort order: +
+                      Map-reduce partition columns: (_col0 pmod 8) (type: bigint)
+                      Statistics: Num rows: 100 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 6058 Data size: 24232 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 152914016 Data size: 1223312128 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm


[16/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
index 26fa9d9..9eeb0d6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
@@ -256,16 +256,12 @@ POSTHOOK: Input: default@flights_tiny_orc
 2010-10-29	12
 2010-10-30	11
 2010-10-31	8
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from flights_tiny_orc sort by fl_num, fl_date limit 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -284,102 +280,46 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc
                   Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: origin_city_name (type: string), dest_city_name (type: string), fl_date (type: date), fl_time (type: timestamp), arr_delay (type: float), fl_num (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                     Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col5 (type: int), _col2 (type: date)
                       sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY.reducesinkkey1 (type: date), VALUE._col2 (type: timestamp), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 1, 4, 5, 0]
                 Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col5 (type: int), _col2 (type: date)
                     sort order: ++
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                     Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY.reducesinkkey1 (type: date), VALUE._col2 (type: timestamp), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 1, 4, 5, 0]
                 Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -425,16 +365,12 @@ Chicago	New York	2010-10-24	2010-10-24 07:00:00	113.0	897
 Chicago	New York	2010-10-25	2010-10-25 07:00:00	-1.0	897
 Chicago	New York	2010-10-26	2010-10-26 07:00:00	0.0	897
 Chicago	New York	2010-10-27	2010-10-27 07:00:00	-11.0	897
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select fl_date, count(*) from flights_tiny_orc group by fl_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select fl_date, count(*) from flights_tiny_orc group by fl_date
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -452,26 +388,12 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc
                   Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: fl_date (type: date)
                     outputColumnNames: fl_date
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 2
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: fl_date (type: date)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -480,50 +402,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: date)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: date)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: date)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 68 Data size: 19584 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 68 Data size: 19584 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -697,19 +590,17 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-29
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-30
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-31
 #### A masked pattern was here ####
-Baltimore	New York	2010-10-26 07:00:00	-22.0	1064	2010-10-26
-Baltimore	New York	2010-10-26 07:00:00	123.0	1142	2010-10-26
-Baltimore	New York	2010-10-26 07:00:00	90.0	1599	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	12.0	361	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	0.0	897	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	29.0	1531	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	-17.0	1610	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	6.0	3198	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	4.0	2630	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	-27.0	2646	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	-11.0	2662	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	13.0	3014	2010-10-26
-Washington	New York	2010-10-26 07:00:00	4.0	7291	2010-10-26
+Baltimore	New York	2010-10-20 07:00:00	-30.0	1064	2010-10-20
+Baltimore	New York	2010-10-20 07:00:00	23.0	1142	2010-10-20
+Baltimore	New York	2010-10-20 07:00:00	6.0	1599	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	42.0	361	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	24.0	897	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	15.0	1531	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	-6.0	1610	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	-2.0	3198	2010-10-20
+Cleveland	New York	2010-10-20 07:00:00	-8.0	2630	2010-10-20
+Cleveland	New York	2010-10-20 07:00:00	-15.0	3014	2010-10-20
+Washington	New York	2010-10-20 07:00:00	-2.0	7291	2010-10-20
 Baltimore	New York	2010-10-21 07:00:00	17.0	1064	2010-10-21
 Baltimore	New York	2010-10-21 07:00:00	105.0	1142	2010-10-21
 Baltimore	New York	2010-10-21 07:00:00	28.0	1599	2010-10-21
@@ -722,30 +613,17 @@ Cleveland	New York	2010-10-21 07:00:00	3.0	2630	2010-10-21
 Cleveland	New York	2010-10-21 07:00:00	29.0	2646	2010-10-21
 Cleveland	New York	2010-10-21 07:00:00	72.0	3014	2010-10-21
 Washington	New York	2010-10-21 07:00:00	22.0	7291	2010-10-21
-Baltimore	New York	2010-10-25 07:00:00	-25.0	1064	2010-10-25
-Baltimore	New York	2010-10-25 07:00:00	92.0	1142	2010-10-25
-Baltimore	New York	2010-10-25 07:00:00	106.0	1599	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	31.0	361	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	-1.0	897	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	43.0	1531	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	6.0	1610	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	-16.0	3198	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	-4.0	2630	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	81.0	2646	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	42.0	3014	2010-10-25
-Washington	New York	2010-10-25 07:00:00	9.0	7291	2010-10-25
-Baltimore	New York	2010-10-24 07:00:00	12.0	1599	2010-10-24
-Baltimore	New York	2010-10-24 07:00:00	20.0	2571	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	10.0	361	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	113.0	897	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-5.0	1531	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-17.0	1610	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-3.0	3198	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	5.0	2254	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-11.0	2630	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-20.0	2646	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-9.0	3014	2010-10-24
-Washington	New York	2010-10-24 07:00:00	-26.0	7282	2010-10-24
+Baltimore	New York	2010-10-22 07:00:00	-12.0	1064	2010-10-22
+Baltimore	New York	2010-10-22 07:00:00	54.0	1142	2010-10-22
+Baltimore	New York	2010-10-22 07:00:00	18.0	1599	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	2.0	361	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	24.0	897	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	16.0	1531	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	-6.0	1610	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	-11.0	3198	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	1.0	2630	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	-25.0	2646	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	-3.0	3014	2010-10-22
 Baltimore	New York	2010-10-23 07:00:00	18.0	272	2010-10-23
 Baltimore	New York	2010-10-23 07:00:00	-10.0	1805	2010-10-23
 Baltimore	New York	2010-10-23 07:00:00	6.0	3171	2010-10-23
@@ -758,18 +636,43 @@ Washington	New York	2010-10-23 07:00:00	-25.0	5832	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-21.0	5904	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-18.0	5917	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-16.0	7274	2010-10-23
-Baltimore	New York	2010-10-29 07:00:00	-24.0	1064	2010-10-29
-Baltimore	New York	2010-10-29 07:00:00	21.0	1142	2010-10-29
-Baltimore	New York	2010-10-29 07:00:00	-2.0	1599	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-12.0	361	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-11.0	897	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	15.0	1531	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-18.0	1610	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-4.0	3198	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-4.0	2630	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-19.0	2646	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-12.0	3014	2010-10-29
-Washington	New York	2010-10-29 07:00:00	1.0	7291	2010-10-29
+Baltimore	New York	2010-10-24 07:00:00	12.0	1599	2010-10-24
+Baltimore	New York	2010-10-24 07:00:00	20.0	2571	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	10.0	361	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	113.0	897	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-5.0	1531	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-17.0	1610	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-3.0	3198	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	5.0	2254	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-11.0	2630	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-20.0	2646	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-9.0	3014	2010-10-24
+Washington	New York	2010-10-24 07:00:00	-26.0	7282	2010-10-24
+Baltimore	New York	2010-10-25 07:00:00	-25.0	1064	2010-10-25
+Baltimore	New York	2010-10-25 07:00:00	92.0	1142	2010-10-25
+Baltimore	New York	2010-10-25 07:00:00	106.0	1599	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	31.0	361	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	-1.0	897	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	43.0	1531	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	6.0	1610	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	-16.0	3198	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	-4.0	2630	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	81.0	2646	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	42.0	3014	2010-10-25
+Washington	New York	2010-10-25 07:00:00	9.0	7291	2010-10-25
+Baltimore	New York	2010-10-26 07:00:00	-22.0	1064	2010-10-26
+Baltimore	New York	2010-10-26 07:00:00	123.0	1142	2010-10-26
+Baltimore	New York	2010-10-26 07:00:00	90.0	1599	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	12.0	361	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	0.0	897	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	29.0	1531	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	-17.0	1610	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	6.0	3198	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	4.0	2630	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	-27.0	2646	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	-11.0	2662	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	13.0	3014	2010-10-26
+Washington	New York	2010-10-26 07:00:00	4.0	7291	2010-10-26
 Baltimore	New York	2010-10-27 07:00:00	-18.0	1064	2010-10-27
 Baltimore	New York	2010-10-27 07:00:00	49.0	1142	2010-10-27
 Baltimore	New York	2010-10-27 07:00:00	92.0	1599	2010-10-27
@@ -793,6 +696,18 @@ Cleveland	New York	2010-10-28 07:00:00	3.0	2630	2010-10-28
 Cleveland	New York	2010-10-28 07:00:00	-6.0	2646	2010-10-28
 Cleveland	New York	2010-10-28 07:00:00	1.0	3014	2010-10-28
 Washington	New York	2010-10-28 07:00:00	45.0	7291	2010-10-28
+Baltimore	New York	2010-10-29 07:00:00	-24.0	1064	2010-10-29
+Baltimore	New York	2010-10-29 07:00:00	21.0	1142	2010-10-29
+Baltimore	New York	2010-10-29 07:00:00	-2.0	1599	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-12.0	361	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-11.0	897	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	15.0	1531	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-18.0	1610	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-4.0	3198	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-4.0	2630	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-19.0	2646	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-12.0	3014	2010-10-29
+Washington	New York	2010-10-29 07:00:00	1.0	7291	2010-10-29
 Baltimore	New York	2010-10-30 07:00:00	14.0	272	2010-10-30
 Baltimore	New York	2010-10-30 07:00:00	-1.0	1805	2010-10-30
 Baltimore	New York	2010-10-30 07:00:00	5.0	3171	2010-10-30
@@ -804,28 +719,6 @@ Cleveland	New York	2010-10-30 07:00:00	-23.0	2018	2010-10-30
 Cleveland	New York	2010-10-30 07:00:00	-12.0	2932	2010-10-30
 Washington	New York	2010-10-30 07:00:00	-27.0	5904	2010-10-30
 Washington	New York	2010-10-30 07:00:00	-16.0	5917	2010-10-30
-Baltimore	New York	2010-10-20 07:00:00	-30.0	1064	2010-10-20
-Baltimore	New York	2010-10-20 07:00:00	23.0	1142	2010-10-20
-Baltimore	New York	2010-10-20 07:00:00	6.0	1599	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	42.0	361	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	24.0	897	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	15.0	1531	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	-6.0	1610	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	-2.0	3198	2010-10-20
-Cleveland	New York	2010-10-20 07:00:00	-8.0	2630	2010-10-20
-Cleveland	New York	2010-10-20 07:00:00	-15.0	3014	2010-10-20
-Washington	New York	2010-10-20 07:00:00	-2.0	7291	2010-10-20
-Baltimore	New York	2010-10-22 07:00:00	-12.0	1064	2010-10-22
-Baltimore	New York	2010-10-22 07:00:00	54.0	1142	2010-10-22
-Baltimore	New York	2010-10-22 07:00:00	18.0	1599	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	2.0	361	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	24.0	897	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	16.0	1531	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	-6.0	1610	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	-11.0	3198	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	1.0	2630	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	-25.0	2646	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	-3.0	3014	2010-10-22
 Baltimore	New York	2010-10-31 07:00:00	-1.0	1599	2010-10-31
 Baltimore	New York	2010-10-31 07:00:00	-14.0	2571	2010-10-31
 Chicago	New York	2010-10-31 07:00:00	-25.0	361	2010-10-31
@@ -935,67 +828,26 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-31
 2010-10-29	12
 2010-10-30	11
 2010-10-31	8
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from flights_tiny_orc_partitioned_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from flights_tiny_orc_partitioned_date
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: flights_tiny_orc_partitioned_date
-                  Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: origin_city_name (type: string), dest_city_name (type: string), fl_time (type: timestamp), arr_delay (type: float), fl_num (type: int), fl_date (type: date)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 137 Data size: 7672 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 137 Data size: 7672 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: flights_tiny_orc_partitioned_date
+          Select Operator
+            expressions: origin_city_name (type: string), dest_city_name (type: string), fl_time (type: timestamp), arr_delay (type: float), fl_num (type: int), fl_date (type: date)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+            ListSink
 
 PREHOOK: query: select * from flights_tiny_orc_partitioned_date
 PREHOOK: type: QUERY
@@ -1029,19 +881,17 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-29
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-30
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_date@fl_date=2010-10-31
 #### A masked pattern was here ####
-Baltimore	New York	2010-10-26 07:00:00	-22.0	1064	2010-10-26
-Baltimore	New York	2010-10-26 07:00:00	123.0	1142	2010-10-26
-Baltimore	New York	2010-10-26 07:00:00	90.0	1599	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	12.0	361	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	0.0	897	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	29.0	1531	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	-17.0	1610	2010-10-26
-Chicago	New York	2010-10-26 07:00:00	6.0	3198	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	4.0	2630	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	-27.0	2646	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	-11.0	2662	2010-10-26
-Cleveland	New York	2010-10-26 07:00:00	13.0	3014	2010-10-26
-Washington	New York	2010-10-26 07:00:00	4.0	7291	2010-10-26
+Baltimore	New York	2010-10-20 07:00:00	-30.0	1064	2010-10-20
+Baltimore	New York	2010-10-20 07:00:00	23.0	1142	2010-10-20
+Baltimore	New York	2010-10-20 07:00:00	6.0	1599	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	42.0	361	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	24.0	897	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	15.0	1531	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	-6.0	1610	2010-10-20
+Chicago	New York	2010-10-20 07:00:00	-2.0	3198	2010-10-20
+Cleveland	New York	2010-10-20 07:00:00	-8.0	2630	2010-10-20
+Cleveland	New York	2010-10-20 07:00:00	-15.0	3014	2010-10-20
+Washington	New York	2010-10-20 07:00:00	-2.0	7291	2010-10-20
 Baltimore	New York	2010-10-21 07:00:00	17.0	1064	2010-10-21
 Baltimore	New York	2010-10-21 07:00:00	105.0	1142	2010-10-21
 Baltimore	New York	2010-10-21 07:00:00	28.0	1599	2010-10-21
@@ -1054,30 +904,17 @@ Cleveland	New York	2010-10-21 07:00:00	3.0	2630	2010-10-21
 Cleveland	New York	2010-10-21 07:00:00	29.0	2646	2010-10-21
 Cleveland	New York	2010-10-21 07:00:00	72.0	3014	2010-10-21
 Washington	New York	2010-10-21 07:00:00	22.0	7291	2010-10-21
-Baltimore	New York	2010-10-25 07:00:00	-25.0	1064	2010-10-25
-Baltimore	New York	2010-10-25 07:00:00	92.0	1142	2010-10-25
-Baltimore	New York	2010-10-25 07:00:00	106.0	1599	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	31.0	361	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	-1.0	897	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	43.0	1531	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	6.0	1610	2010-10-25
-Chicago	New York	2010-10-25 07:00:00	-16.0	3198	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	-4.0	2630	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	81.0	2646	2010-10-25
-Cleveland	New York	2010-10-25 07:00:00	42.0	3014	2010-10-25
-Washington	New York	2010-10-25 07:00:00	9.0	7291	2010-10-25
-Baltimore	New York	2010-10-24 07:00:00	12.0	1599	2010-10-24
-Baltimore	New York	2010-10-24 07:00:00	20.0	2571	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	10.0	361	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	113.0	897	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-5.0	1531	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-17.0	1610	2010-10-24
-Chicago	New York	2010-10-24 07:00:00	-3.0	3198	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	5.0	2254	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-11.0	2630	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-20.0	2646	2010-10-24
-Cleveland	New York	2010-10-24 07:00:00	-9.0	3014	2010-10-24
-Washington	New York	2010-10-24 07:00:00	-26.0	7282	2010-10-24
+Baltimore	New York	2010-10-22 07:00:00	-12.0	1064	2010-10-22
+Baltimore	New York	2010-10-22 07:00:00	54.0	1142	2010-10-22
+Baltimore	New York	2010-10-22 07:00:00	18.0	1599	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	2.0	361	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	24.0	897	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	16.0	1531	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	-6.0	1610	2010-10-22
+Chicago	New York	2010-10-22 07:00:00	-11.0	3198	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	1.0	2630	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	-25.0	2646	2010-10-22
+Cleveland	New York	2010-10-22 07:00:00	-3.0	3014	2010-10-22
 Baltimore	New York	2010-10-23 07:00:00	18.0	272	2010-10-23
 Baltimore	New York	2010-10-23 07:00:00	-10.0	1805	2010-10-23
 Baltimore	New York	2010-10-23 07:00:00	6.0	3171	2010-10-23
@@ -1090,41 +927,78 @@ Washington	New York	2010-10-23 07:00:00	-25.0	5832	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-21.0	5904	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-18.0	5917	2010-10-23
 Washington	New York	2010-10-23 07:00:00	-16.0	7274	2010-10-23
-Baltimore	New York	2010-10-29 07:00:00	-24.0	1064	2010-10-29
-Baltimore	New York	2010-10-29 07:00:00	21.0	1142	2010-10-29
-Baltimore	New York	2010-10-29 07:00:00	-2.0	1599	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-12.0	361	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-11.0	897	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	15.0	1531	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-18.0	1610	2010-10-29
-Chicago	New York	2010-10-29 07:00:00	-4.0	3198	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-4.0	2630	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-19.0	2646	2010-10-29
-Cleveland	New York	2010-10-29 07:00:00	-12.0	3014	2010-10-29
-Washington	New York	2010-10-29 07:00:00	1.0	7291	2010-10-29
-Baltimore	New York	2010-10-27 07:00:00	-18.0	1064	2010-10-27
-Baltimore	New York	2010-10-27 07:00:00	49.0	1142	2010-10-27
-Baltimore	New York	2010-10-27 07:00:00	92.0	1599	2010-10-27
-Chicago	New York	2010-10-27 07:00:00	148.0	361	2010-10-27
-Chicago	New York	2010-10-27 07:00:00	-11.0	897	2010-10-27
-Chicago	New York	2010-10-27 07:00:00	70.0	1531	2010-10-27
-Chicago	New York	2010-10-27 07:00:00	8.0	1610	2010-10-27
-Chicago	New York	2010-10-27 07:00:00	21.0	3198	2010-10-27
-Cleveland	New York	2010-10-27 07:00:00	16.0	2630	2010-10-27
-Cleveland	New York	2010-10-27 07:00:00	27.0	3014	2010-10-27
-Washington	New York	2010-10-27 07:00:00	26.0	7291	2010-10-27
-Baltimore	New York	2010-10-28 07:00:00	-4.0	1064	2010-10-28
-Baltimore	New York	2010-10-28 07:00:00	-14.0	1142	2010-10-28
-Baltimore	New York	2010-10-28 07:00:00	-14.0	1599	2010-10-28
-Chicago	New York	2010-10-28 07:00:00	2.0	361	2010-10-28
-Chicago	New York	2010-10-28 07:00:00	2.0	897	2010-10-28
-Chicago	New York	2010-10-28 07:00:00	-11.0	1531	2010-10-28
-Chicago	New York	2010-10-28 07:00:00	3.0	1610	2010-10-28
-Chicago	New York	2010-10-28 07:00:00	-18.0	3198	2010-10-28
-Cleveland	New York	2010-10-28 07:00:00	3.0	2630	2010-10-28
-Cleveland	New York	2010-10-28 07:00:00	-6.0	2646	2010-10-28
-Cleveland	New York	2010-10-28 07:00:00	1.0	3014	2010-10-28
+Baltimore	New York	2010-10-24 07:00:00	12.0	1599	2010-10-24
+Baltimore	New York	2010-10-24 07:00:00	20.0	2571	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	10.0	361	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	113.0	897	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-5.0	1531	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-17.0	1610	2010-10-24
+Chicago	New York	2010-10-24 07:00:00	-3.0	3198	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	5.0	2254	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-11.0	2630	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-20.0	2646	2010-10-24
+Cleveland	New York	2010-10-24 07:00:00	-9.0	3014	2010-10-24
+Washington	New York	2010-10-24 07:00:00	-26.0	7282	2010-10-24
+Baltimore	New York	2010-10-25 07:00:00	-25.0	1064	2010-10-25
+Baltimore	New York	2010-10-25 07:00:00	92.0	1142	2010-10-25
+Baltimore	New York	2010-10-25 07:00:00	106.0	1599	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	31.0	361	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	-1.0	897	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	43.0	1531	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	6.0	1610	2010-10-25
+Chicago	New York	2010-10-25 07:00:00	-16.0	3198	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	-4.0	2630	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	81.0	2646	2010-10-25
+Cleveland	New York	2010-10-25 07:00:00	42.0	3014	2010-10-25
+Washington	New York	2010-10-25 07:00:00	9.0	7291	2010-10-25
+Baltimore	New York	2010-10-26 07:00:00	-22.0	1064	2010-10-26
+Baltimore	New York	2010-10-26 07:00:00	123.0	1142	2010-10-26
+Baltimore	New York	2010-10-26 07:00:00	90.0	1599	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	12.0	361	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	0.0	897	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	29.0	1531	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	-17.0	1610	2010-10-26
+Chicago	New York	2010-10-26 07:00:00	6.0	3198	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	4.0	2630	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	-27.0	2646	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	-11.0	2662	2010-10-26
+Cleveland	New York	2010-10-26 07:00:00	13.0	3014	2010-10-26
+Washington	New York	2010-10-26 07:00:00	4.0	7291	2010-10-26
+Baltimore	New York	2010-10-27 07:00:00	-18.0	1064	2010-10-27
+Baltimore	New York	2010-10-27 07:00:00	49.0	1142	2010-10-27
+Baltimore	New York	2010-10-27 07:00:00	92.0	1599	2010-10-27
+Chicago	New York	2010-10-27 07:00:00	148.0	361	2010-10-27
+Chicago	New York	2010-10-27 07:00:00	-11.0	897	2010-10-27
+Chicago	New York	2010-10-27 07:00:00	70.0	1531	2010-10-27
+Chicago	New York	2010-10-27 07:00:00	8.0	1610	2010-10-27
+Chicago	New York	2010-10-27 07:00:00	21.0	3198	2010-10-27
+Cleveland	New York	2010-10-27 07:00:00	16.0	2630	2010-10-27
+Cleveland	New York	2010-10-27 07:00:00	27.0	3014	2010-10-27
+Washington	New York	2010-10-27 07:00:00	26.0	7291	2010-10-27
+Baltimore	New York	2010-10-28 07:00:00	-4.0	1064	2010-10-28
+Baltimore	New York	2010-10-28 07:00:00	-14.0	1142	2010-10-28
+Baltimore	New York	2010-10-28 07:00:00	-14.0	1599	2010-10-28
+Chicago	New York	2010-10-28 07:00:00	2.0	361	2010-10-28
+Chicago	New York	2010-10-28 07:00:00	2.0	897	2010-10-28
+Chicago	New York	2010-10-28 07:00:00	-11.0	1531	2010-10-28
+Chicago	New York	2010-10-28 07:00:00	3.0	1610	2010-10-28
+Chicago	New York	2010-10-28 07:00:00	-18.0	3198	2010-10-28
+Cleveland	New York	2010-10-28 07:00:00	3.0	2630	2010-10-28
+Cleveland	New York	2010-10-28 07:00:00	-6.0	2646	2010-10-28
+Cleveland	New York	2010-10-28 07:00:00	1.0	3014	2010-10-28
 Washington	New York	2010-10-28 07:00:00	45.0	7291	2010-10-28
+Baltimore	New York	2010-10-29 07:00:00	-24.0	1064	2010-10-29
+Baltimore	New York	2010-10-29 07:00:00	21.0	1142	2010-10-29
+Baltimore	New York	2010-10-29 07:00:00	-2.0	1599	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-12.0	361	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-11.0	897	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	15.0	1531	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-18.0	1610	2010-10-29
+Chicago	New York	2010-10-29 07:00:00	-4.0	3198	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-4.0	2630	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-19.0	2646	2010-10-29
+Cleveland	New York	2010-10-29 07:00:00	-12.0	3014	2010-10-29
+Washington	New York	2010-10-29 07:00:00	1.0	7291	2010-10-29
 Baltimore	New York	2010-10-30 07:00:00	14.0	272	2010-10-30
 Baltimore	New York	2010-10-30 07:00:00	-1.0	1805	2010-10-30
 Baltimore	New York	2010-10-30 07:00:00	5.0	3171	2010-10-30
@@ -1136,28 +1010,6 @@ Cleveland	New York	2010-10-30 07:00:00	-23.0	2018	2010-10-30
 Cleveland	New York	2010-10-30 07:00:00	-12.0	2932	2010-10-30
 Washington	New York	2010-10-30 07:00:00	-27.0	5904	2010-10-30
 Washington	New York	2010-10-30 07:00:00	-16.0	5917	2010-10-30
-Baltimore	New York	2010-10-20 07:00:00	-30.0	1064	2010-10-20
-Baltimore	New York	2010-10-20 07:00:00	23.0	1142	2010-10-20
-Baltimore	New York	2010-10-20 07:00:00	6.0	1599	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	42.0	361	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	24.0	897	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	15.0	1531	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	-6.0	1610	2010-10-20
-Chicago	New York	2010-10-20 07:00:00	-2.0	3198	2010-10-20
-Cleveland	New York	2010-10-20 07:00:00	-8.0	2630	2010-10-20
-Cleveland	New York	2010-10-20 07:00:00	-15.0	3014	2010-10-20
-Washington	New York	2010-10-20 07:00:00	-2.0	7291	2010-10-20
-Baltimore	New York	2010-10-22 07:00:00	-12.0	1064	2010-10-22
-Baltimore	New York	2010-10-22 07:00:00	54.0	1142	2010-10-22
-Baltimore	New York	2010-10-22 07:00:00	18.0	1599	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	2.0	361	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	24.0	897	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	16.0	1531	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	-6.0	1610	2010-10-22
-Chicago	New York	2010-10-22 07:00:00	-11.0	3198	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	1.0	2630	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	-25.0	2646	2010-10-22
-Cleveland	New York	2010-10-22 07:00:00	-3.0	3014	2010-10-22
 Baltimore	New York	2010-10-31 07:00:00	-1.0	1599	2010-10-31
 Baltimore	New York	2010-10-31 07:00:00	-14.0	2571	2010-10-31
 Chicago	New York	2010-10-31 07:00:00	-25.0	361	2010-10-31
@@ -1166,16 +1018,12 @@ Chicago	New York	2010-10-31 07:00:00	-4.0	1531	2010-10-31
 Chicago	New York	2010-10-31 07:00:00	-22.0	1610	2010-10-31
 Chicago	New York	2010-10-31 07:00:00	-15.0	3198	2010-10-31
 Washington	New York	2010-10-31 07:00:00	-18.0	7282	2010-10-31
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from flights_tiny_orc_partitioned_date sort by fl_num, fl_date limit 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1194,102 +1042,46 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc_partitioned_date
                   Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: origin_city_name (type: string), dest_city_name (type: string), fl_time (type: timestamp), arr_delay (type: float), fl_num (type: int), fl_date (type: date)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                     Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col4 (type: int), _col5 (type: date)
                       sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: timestamp), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: date)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 4, 5, 0, 1]
                 Statistics: Num rows: 137 Data size: 7672 Basic stats: COMPLETE Column stats: PARTIAL
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                   Reduce Output Operator
                     key expressions: _col4 (type: int), _col5 (type: date)
                     sort order: ++
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                     Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: timestamp), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: date)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 4, 5, 0, 1]
                 Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1359,16 +1151,12 @@ Chicago	New York	2010-10-24 07:00:00	113.0	897	2010-10-24
 Chicago	New York	2010-10-25 07:00:00	-1.0	897	2010-10-25
 Chicago	New York	2010-10-26 07:00:00	0.0	897	2010-10-26
 Chicago	New York	2010-10-27 07:00:00	-11.0	897	2010-10-27
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select fl_date, count(*) from flights_tiny_orc_partitioned_date group by fl_date
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1386,26 +1174,12 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc_partitioned_date
                   Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: fl_date (type: date)
                     outputColumnNames: fl_date
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [5]
                     Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 5
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: fl_date (type: date)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1414,50 +1188,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: date)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: date)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 768 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: date)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 12 Data size: 768 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 12 Data size: 768 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1655,19 +1400,17 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-30 07%3A00%3A00
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-31 07%3A00%3A00
 #### A masked pattern was here ####
-Baltimore	New York	2010-10-26	-22.0	1064	2010-10-26 07:00:00
-Baltimore	New York	2010-10-26	123.0	1142	2010-10-26 07:00:00
-Baltimore	New York	2010-10-26	90.0	1599	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	12.0	361	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	0.0	897	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	29.0	1531	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	-17.0	1610	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	6.0	3198	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	4.0	2630	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	-27.0	2646	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	-11.0	2662	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	13.0	3014	2010-10-26 07:00:00
-Washington	New York	2010-10-26	4.0	7291	2010-10-26 07:00:00
+Baltimore	New York	2010-10-20	-30.0	1064	2010-10-20 07:00:00
+Baltimore	New York	2010-10-20	23.0	1142	2010-10-20 07:00:00
+Baltimore	New York	2010-10-20	6.0	1599	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	42.0	361	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	24.0	897	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	15.0	1531	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	-6.0	1610	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	-2.0	3198	2010-10-20 07:00:00
+Cleveland	New York	2010-10-20	-8.0	2630	2010-10-20 07:00:00
+Cleveland	New York	2010-10-20	-15.0	3014	2010-10-20 07:00:00
+Washington	New York	2010-10-20	-2.0	7291	2010-10-20 07:00:00
 Baltimore	New York	2010-10-21	17.0	1064	2010-10-21 07:00:00
 Baltimore	New York	2010-10-21	105.0	1142	2010-10-21 07:00:00
 Baltimore	New York	2010-10-21	28.0	1599	2010-10-21 07:00:00
@@ -1680,30 +1423,17 @@ Cleveland	New York	2010-10-21	3.0	2630	2010-10-21 07:00:00
 Cleveland	New York	2010-10-21	29.0	2646	2010-10-21 07:00:00
 Cleveland	New York	2010-10-21	72.0	3014	2010-10-21 07:00:00
 Washington	New York	2010-10-21	22.0	7291	2010-10-21 07:00:00
-Baltimore	New York	2010-10-25	-25.0	1064	2010-10-25 07:00:00
-Baltimore	New York	2010-10-25	92.0	1142	2010-10-25 07:00:00
-Baltimore	New York	2010-10-25	106.0	1599	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	31.0	361	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	-1.0	897	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	43.0	1531	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	6.0	1610	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	-16.0	3198	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	-4.0	2630	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	81.0	2646	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	42.0	3014	2010-10-25 07:00:00
-Washington	New York	2010-10-25	9.0	7291	2010-10-25 07:00:00
-Baltimore	New York	2010-10-24	12.0	1599	2010-10-24 07:00:00
-Baltimore	New York	2010-10-24	20.0	2571	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	10.0	361	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	113.0	897	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-5.0	1531	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-17.0	1610	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-3.0	3198	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	5.0	2254	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-11.0	2630	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-20.0	2646	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-9.0	3014	2010-10-24 07:00:00
-Washington	New York	2010-10-24	-26.0	7282	2010-10-24 07:00:00
+Baltimore	New York	2010-10-22	-12.0	1064	2010-10-22 07:00:00
+Baltimore	New York	2010-10-22	54.0	1142	2010-10-22 07:00:00
+Baltimore	New York	2010-10-22	18.0	1599	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	2.0	361	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	24.0	897	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	16.0	1531	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	-6.0	1610	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	-11.0	3198	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	1.0	2630	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	-25.0	2646	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	-3.0	3014	2010-10-22 07:00:00
 Baltimore	New York	2010-10-23	18.0	272	2010-10-23 07:00:00
 Baltimore	New York	2010-10-23	-10.0	1805	2010-10-23 07:00:00
 Baltimore	New York	2010-10-23	6.0	3171	2010-10-23 07:00:00
@@ -1716,18 +1446,43 @@ Washington	New York	2010-10-23	-25.0	5832	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-21.0	5904	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-18.0	5917	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-16.0	7274	2010-10-23 07:00:00
-Baltimore	New York	2010-10-29	-24.0	1064	2010-10-29 07:00:00
-Baltimore	New York	2010-10-29	21.0	1142	2010-10-29 07:00:00
-Baltimore	New York	2010-10-29	-2.0	1599	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-12.0	361	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-11.0	897	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	15.0	1531	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-18.0	1610	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-4.0	3198	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-4.0	2630	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-19.0	2646	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-12.0	3014	2010-10-29 07:00:00
-Washington	New York	2010-10-29	1.0	7291	2010-10-29 07:00:00
+Baltimore	New York	2010-10-24	12.0	1599	2010-10-24 07:00:00
+Baltimore	New York	2010-10-24	20.0	2571	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	10.0	361	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	113.0	897	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-5.0	1531	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-17.0	1610	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-3.0	3198	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	5.0	2254	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-11.0	2630	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-20.0	2646	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-9.0	3014	2010-10-24 07:00:00
+Washington	New York	2010-10-24	-26.0	7282	2010-10-24 07:00:00
+Baltimore	New York	2010-10-25	-25.0	1064	2010-10-25 07:00:00
+Baltimore	New York	2010-10-25	92.0	1142	2010-10-25 07:00:00
+Baltimore	New York	2010-10-25	106.0	1599	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	31.0	361	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	-1.0	897	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	43.0	1531	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	6.0	1610	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	-16.0	3198	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	-4.0	2630	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	81.0	2646	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	42.0	3014	2010-10-25 07:00:00
+Washington	New York	2010-10-25	9.0	7291	2010-10-25 07:00:00
+Baltimore	New York	2010-10-26	-22.0	1064	2010-10-26 07:00:00
+Baltimore	New York	2010-10-26	123.0	1142	2010-10-26 07:00:00
+Baltimore	New York	2010-10-26	90.0	1599	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	12.0	361	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	0.0	897	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	29.0	1531	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	-17.0	1610	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	6.0	3198	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	4.0	2630	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	-27.0	2646	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	-11.0	2662	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	13.0	3014	2010-10-26 07:00:00
+Washington	New York	2010-10-26	4.0	7291	2010-10-26 07:00:00
 Baltimore	New York	2010-10-27	-18.0	1064	2010-10-27 07:00:00
 Baltimore	New York	2010-10-27	49.0	1142	2010-10-27 07:00:00
 Baltimore	New York	2010-10-27	92.0	1599	2010-10-27 07:00:00
@@ -1751,6 +1506,18 @@ Cleveland	New York	2010-10-28	3.0	2630	2010-10-28 07:00:00
 Cleveland	New York	2010-10-28	-6.0	2646	2010-10-28 07:00:00
 Cleveland	New York	2010-10-28	1.0	3014	2010-10-28 07:00:00
 Washington	New York	2010-10-28	45.0	7291	2010-10-28 07:00:00
+Baltimore	New York	2010-10-29	-24.0	1064	2010-10-29 07:00:00
+Baltimore	New York	2010-10-29	21.0	1142	2010-10-29 07:00:00
+Baltimore	New York	2010-10-29	-2.0	1599	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-12.0	361	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-11.0	897	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	15.0	1531	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-18.0	1610	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-4.0	3198	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-4.0	2630	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-19.0	2646	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-12.0	3014	2010-10-29 07:00:00
+Washington	New York	2010-10-29	1.0	7291	2010-10-29 07:00:00
 Baltimore	New York	2010-10-30	14.0	272	2010-10-30 07:00:00
 Baltimore	New York	2010-10-30	-1.0	1805	2010-10-30 07:00:00
 Baltimore	New York	2010-10-30	5.0	3171	2010-10-30 07:00:00
@@ -1762,28 +1529,6 @@ Cleveland	New York	2010-10-30	-23.0	2018	2010-10-30 07:00:00
 Cleveland	New York	2010-10-30	-12.0	2932	2010-10-30 07:00:00
 Washington	New York	2010-10-30	-27.0	5904	2010-10-30 07:00:00
 Washington	New York	2010-10-30	-16.0	5917	2010-10-30 07:00:00
-Baltimore	New York	2010-10-20	-30.0	1064	2010-10-20 07:00:00
-Baltimore	New York	2010-10-20	23.0	1142	2010-10-20 07:00:00
-Baltimore	New York	2010-10-20	6.0	1599	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	42.0	361	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	24.0	897	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	15.0	1531	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	-6.0	1610	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	-2.0	3198	2010-10-20 07:00:00
-Cleveland	New York	2010-10-20	-8.0	2630	2010-10-20 07:00:00
-Cleveland	New York	2010-10-20	-15.0	3014	2010-10-20 07:00:00
-Washington	New York	2010-10-20	-2.0	7291	2010-10-20 07:00:00
-Baltimore	New York	2010-10-22	-12.0	1064	2010-10-22 07:00:00
-Baltimore	New York	2010-10-22	54.0	1142	2010-10-22 07:00:00
-Baltimore	New York	2010-10-22	18.0	1599	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	2.0	361	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	24.0	897	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	16.0	1531	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	-6.0	1610	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	-11.0	3198	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	1.0	2630	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	-25.0	2646	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	-3.0	3014	2010-10-22 07:00:00
 Baltimore	New York	2010-10-31	-1.0	1599	2010-10-31 07:00:00
 Baltimore	New York	2010-10-31	-14.0	2571	2010-10-31 07:00:00
 Chicago	New York	2010-10-31	-25.0	361	2010-10-31 07:00:00
@@ -1893,67 +1638,26 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-
 2010-10-29 07:00:00	12
 2010-10-30 07:00:00	11
 2010-10-31 07:00:00	8
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from flights_tiny_orc_partitioned_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from flights_tiny_orc_partitioned_timestamp
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: flights_tiny_orc_partitioned_timestamp
-                  Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: origin_city_name (type: string), dest_city_name (type: string), fl_date (type: date), arr_delay (type: float), fl_num (type: int), fl_time (type: timestamp)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 137 Data size: 5480 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 137 Data size: 5480 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: flights_tiny_orc_partitioned_timestamp
+          Select Operator
+            expressions: origin_city_name (type: string), dest_city_name (type: string), fl_date (type: date), arr_delay (type: float), fl_num (type: int), fl_time (type: timestamp)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+            ListSink
 
 PREHOOK: query: select * from flights_tiny_orc_partitioned_timestamp
 PREHOOK: type: QUERY
@@ -1987,19 +1691,17 @@ POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-30 07%3A00%3A00
 POSTHOOK: Input: default@flights_tiny_orc_partitioned_timestamp@fl_time=2010-10-31 07%3A00%3A00
 #### A masked pattern was here ####
-Baltimore	New York	2010-10-26	-22.0	1064	2010-10-26 07:00:00
-Baltimore	New York	2010-10-26	123.0	1142	2010-10-26 07:00:00
-Baltimore	New York	2010-10-26	90.0	1599	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	12.0	361	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	0.0	897	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	29.0	1531	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	-17.0	1610	2010-10-26 07:00:00
-Chicago	New York	2010-10-26	6.0	3198	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	4.0	2630	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	-27.0	2646	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	-11.0	2662	2010-10-26 07:00:00
-Cleveland	New York	2010-10-26	13.0	3014	2010-10-26 07:00:00
-Washington	New York	2010-10-26	4.0	7291	2010-10-26 07:00:00
+Baltimore	New York	2010-10-20	-30.0	1064	2010-10-20 07:00:00
+Baltimore	New York	2010-10-20	23.0	1142	2010-10-20 07:00:00
+Baltimore	New York	2010-10-20	6.0	1599	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	42.0	361	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	24.0	897	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	15.0	1531	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	-6.0	1610	2010-10-20 07:00:00
+Chicago	New York	2010-10-20	-2.0	3198	2010-10-20 07:00:00
+Cleveland	New York	2010-10-20	-8.0	2630	2010-10-20 07:00:00
+Cleveland	New York	2010-10-20	-15.0	3014	2010-10-20 07:00:00
+Washington	New York	2010-10-20	-2.0	7291	2010-10-20 07:00:00
 Baltimore	New York	2010-10-21	17.0	1064	2010-10-21 07:00:00
 Baltimore	New York	2010-10-21	105.0	1142	2010-10-21 07:00:00
 Baltimore	New York	2010-10-21	28.0	1599	2010-10-21 07:00:00
@@ -2012,30 +1714,17 @@ Cleveland	New York	2010-10-21	3.0	2630	2010-10-21 07:00:00
 Cleveland	New York	2010-10-21	29.0	2646	2010-10-21 07:00:00
 Cleveland	New York	2010-10-21	72.0	3014	2010-10-21 07:00:00
 Washington	New York	2010-10-21	22.0	7291	2010-10-21 07:00:00
-Baltimore	New York	2010-10-25	-25.0	1064	2010-10-25 07:00:00
-Baltimore	New York	2010-10-25	92.0	1142	2010-10-25 07:00:00
-Baltimore	New York	2010-10-25	106.0	1599	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	31.0	361	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	-1.0	897	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	43.0	1531	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	6.0	1610	2010-10-25 07:00:00
-Chicago	New York	2010-10-25	-16.0	3198	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	-4.0	2630	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	81.0	2646	2010-10-25 07:00:00
-Cleveland	New York	2010-10-25	42.0	3014	2010-10-25 07:00:00
-Washington	New York	2010-10-25	9.0	7291	2010-10-25 07:00:00
-Baltimore	New York	2010-10-24	12.0	1599	2010-10-24 07:00:00
-Baltimore	New York	2010-10-24	20.0	2571	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	10.0	361	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	113.0	897	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-5.0	1531	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-17.0	1610	2010-10-24 07:00:00
-Chicago	New York	2010-10-24	-3.0	3198	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	5.0	2254	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-11.0	2630	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-20.0	2646	2010-10-24 07:00:00
-Cleveland	New York	2010-10-24	-9.0	3014	2010-10-24 07:00:00
-Washington	New York	2010-10-24	-26.0	7282	2010-10-24 07:00:00
+Baltimore	New York	2010-10-22	-12.0	1064	2010-10-22 07:00:00
+Baltimore	New York	2010-10-22	54.0	1142	2010-10-22 07:00:00
+Baltimore	New York	2010-10-22	18.0	1599	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	2.0	361	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	24.0	897	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	16.0	1531	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	-6.0	1610	2010-10-22 07:00:00
+Chicago	New York	2010-10-22	-11.0	3198	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	1.0	2630	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	-25.0	2646	2010-10-22 07:00:00
+Cleveland	New York	2010-10-22	-3.0	3014	2010-10-22 07:00:00
 Baltimore	New York	2010-10-23	18.0	272	2010-10-23 07:00:00
 Baltimore	New York	2010-10-23	-10.0	1805	2010-10-23 07:00:00
 Baltimore	New York	2010-10-23	6.0	3171	2010-10-23 07:00:00
@@ -2048,18 +1737,43 @@ Washington	New York	2010-10-23	-25.0	5832	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-21.0	5904	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-18.0	5917	2010-10-23 07:00:00
 Washington	New York	2010-10-23	-16.0	7274	2010-10-23 07:00:00
-Baltimore	New York	2010-10-29	-24.0	1064	2010-10-29 07:00:00
-Baltimore	New York	2010-10-29	21.0	1142	2010-10-29 07:00:00
-Baltimore	New York	2010-10-29	-2.0	1599	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-12.0	361	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-11.0	897	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	15.0	1531	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-18.0	1610	2010-10-29 07:00:00
-Chicago	New York	2010-10-29	-4.0	3198	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-4.0	2630	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-19.0	2646	2010-10-29 07:00:00
-Cleveland	New York	2010-10-29	-12.0	3014	2010-10-29 07:00:00
-Washington	New York	2010-10-29	1.0	7291	2010-10-29 07:00:00
+Baltimore	New York	2010-10-24	12.0	1599	2010-10-24 07:00:00
+Baltimore	New York	2010-10-24	20.0	2571	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	10.0	361	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	113.0	897	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-5.0	1531	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-17.0	1610	2010-10-24 07:00:00
+Chicago	New York	2010-10-24	-3.0	3198	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	5.0	2254	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-11.0	2630	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-20.0	2646	2010-10-24 07:00:00
+Cleveland	New York	2010-10-24	-9.0	3014	2010-10-24 07:00:00
+Washington	New York	2010-10-24	-26.0	7282	2010-10-24 07:00:00
+Baltimore	New York	2010-10-25	-25.0	1064	2010-10-25 07:00:00
+Baltimore	New York	2010-10-25	92.0	1142	2010-10-25 07:00:00
+Baltimore	New York	2010-10-25	106.0	1599	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	31.0	361	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	-1.0	897	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	43.0	1531	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	6.0	1610	2010-10-25 07:00:00
+Chicago	New York	2010-10-25	-16.0	3198	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	-4.0	2630	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	81.0	2646	2010-10-25 07:00:00
+Cleveland	New York	2010-10-25	42.0	3014	2010-10-25 07:00:00
+Washington	New York	2010-10-25	9.0	7291	2010-10-25 07:00:00
+Baltimore	New York	2010-10-26	-22.0	1064	2010-10-26 07:00:00
+Baltimore	New York	2010-10-26	123.0	1142	2010-10-26 07:00:00
+Baltimore	New York	2010-10-26	90.0	1599	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	12.0	361	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	0.0	897	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	29.0	1531	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	-17.0	1610	2010-10-26 07:00:00
+Chicago	New York	2010-10-26	6.0	3198	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	4.0	2630	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	-27.0	2646	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	-11.0	2662	2010-10-26 07:00:00
+Cleveland	New York	2010-10-26	13.0	3014	2010-10-26 07:00:00
+Washington	New York	2010-10-26	4.0	7291	2010-10-26 07:00:00
 Baltimore	New York	2010-10-27	-18.0	1064	2010-10-27 07:00:00
 Baltimore	New York	2010-10-27	49.0	1142	2010-10-27 07:00:00
 Baltimore	New York	2010-10-27	92.0	1599	2010-10-27 07:00:00
@@ -2083,6 +1797,18 @@ Cleveland	New York	2010-10-28	3.0	2630	2010-10-28 07:00:00
 Cleveland	New York	2010-10-28	-6.0	2646	2010-10-28 07:00:00
 Cleveland	New York	2010-10-28	1.0	3014	2010-10-28 07:00:00
 Washington	New York	2010-10-28	45.0	7291	2010-10-28 07:00:00
+Baltimore	New York	2010-10-29	-24.0	1064	2010-10-29 07:00:00
+Baltimore	New York	2010-10-29	21.0	1142	2010-10-29 07:00:00
+Baltimore	New York	2010-10-29	-2.0	1599	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-12.0	361	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-11.0	897	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	15.0	1531	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-18.0	1610	2010-10-29 07:00:00
+Chicago	New York	2010-10-29	-4.0	3198	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-4.0	2630	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-19.0	2646	2010-10-29 07:00:00
+Cleveland	New York	2010-10-29	-12.0	3014	2010-10-29 07:00:00
+Washington	New York	2010-10-29	1.0	7291	2010-10-29 07:00:00
 Baltimore	New York	2010-10-30	14.0	272	2010-10-30 07:00:00
 Baltimore	New York	2010-10-30	-1.0	1805	2010-10-30 07:00:00
 Baltimore	New York	2010-10-30	5.0	3171	2010-10-30 07:00:00
@@ -2094,28 +1820,6 @@ Cleveland	New York	2010-10-30	-23.0	2018	2010-10-30 07:00:00
 Cleveland	New York	2010-10-30	-12.0	2932	2010-10-30 07:00:00
 Washington	New York	2010-10-30	-27.0	5904	2010-10-30 07:00:00
 Washington	New York	2010-10-30	-16.0	5917	2010-10-30 07:00:00
-Baltimore	New York	2010-10-20	-30.0	1064	2010-10-20 07:00:00
-Baltimore	New York	2010-10-20	23.0	1142	2010-10-20 07:00:00
-Baltimore	New York	2010-10-20	6.0	1599	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	42.0	361	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	24.0	897	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	15.0	1531	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	-6.0	1610	2010-10-20 07:00:00
-Chicago	New York	2010-10-20	-2.0	3198	2010-10-20 07:00:00
-Cleveland	New York	2010-10-20	-8.0	2630	2010-10-20 07:00:00
-Cleveland	New York	2010-10-20	-15.0	3014	2010-10-20 07:00:00
-Washington	New York	2010-10-20	-2.0	7291	2010-10-20 07:00:00
-Baltimore	New York	2010-10-22	-12.0	1064	2010-10-22 07:00:00
-Baltimore	New York	2010-10-22	54.0	1142	2010-10-22 07:00:00
-Baltimore	New York	2010-10-22	18.0	1599	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	2.0	361	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	24.0	897	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	16.0	1531	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	-6.0	1610	2010-10-22 07:00:00
-Chicago	New York	2010-10-22	-11.0	3198	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	1.0	2630	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	-25.0	2646	2010-10-22 07:00:00
-Cleveland	New York	2010-10-22	-3.0	3014	2010-10-22 07:00:00
 Baltimore	New York	2010-10-31	-1.0	1599	2010-10-31 07:00:00
 Baltimore	New York	2010-10-31	-14.0	2571	2010-10-31 07:00:00
 Chicago	New York	2010-10-31	-25.0	361	2010-10-31 07:00:00
@@ -2124,16 +1828,12 @@ Chicago	New York	2010-10-31	-4.0	1531	2010-10-31 07:00:00
 Chicago	New York	2010-10-31	-22.0	1610	2010-10-31 07:00:00
 Chicago	New York	2010-10-31	-15.0	3198	2010-10-31 07:00:00
 Washington	New York	2010-10-31	-18.0	7282	2010-10-31 07:00:00
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from flights_tiny_orc_partitioned_timestamp sort by fl_num, fl_time limit 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2152,102 +1852,46 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc_partitioned_timestamp
                   Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: origin_city_name (type: string), dest_city_name (type: string), fl_date (type: date), arr_delay (type: float), fl_num (type: int), fl_time (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                     Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: _col4 (type: int), _col5 (type: timestamp)
                       sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: date), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 4, 5, 0, 1]
                 Statistics: Num rows: 137 Data size: 5480 Basic stats: COMPLETE Column stats: PARTIAL
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                   Reduce Output Operator
                     key expressions: _col4 (type: int), _col5 (type: timestamp)
                     sort order: ++
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                     Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: date), VALUE._col3 (type: float), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 3, 4, 5, 0, 1]
                 Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2317,16 +1961,12 @@ Chicago	New York	2010-10-24	113.0	897	2010-10-24 07:00:00
 Chicago	New York	2010-10-25	-1.0	897	2010-10-25 07:00:00
 Chicago	New York	2010-10-26	0.0	897	2010-10-26 07:00:00
 Chicago	New York	2010-10-27	-11.0	897	2010-10-27 07:00:00
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl_time
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2344,26 +1984,12 @@ STAGE PLANS:
                 TableScan
                   alias: flights_tiny_orc_partitioned_timestamp
                   Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: fl_time (type: timestamp)
                     outputColumnNames: fl_time
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [5]
                     Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 5
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: fl_time (type: timestamp)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -2372,50 +1998,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: timestamp)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true


<TRUNCATED>

[37/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
index 85f858b..b137894 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
@@ -87,72 +87,25 @@ POSTHOOK: Lineage: table_add_int_permute_select.b SIMPLE [(values__tmp__table__1
 POSTHOOK: Lineage: table_add_int_permute_select.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_permute_select.insert_num EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_permute_select
-                  Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=20)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=20)
+            default@table_add_int_permute_select,table_add_int_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_permute_select
@@ -259,72 +212,25 @@ POSTHOOK: Lineage: table_add_int_string_permute_select.c EXPRESSION [(values__tm
 POSTHOOK: Lineage: table_add_int_string_permute_select.d SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_string_permute_select.insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_string_permute_select
-                  Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=20)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=20)
+            default@table_add_int_string_permute_select,table_add_int_string_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_string_permute_select
@@ -501,72 +407,25 @@ POSTHOOK: Lineage: table_change_string_group_double.c2 EXPRESSION [(values__tmp_
 POSTHOOK: Lineage: table_change_string_group_double.c3 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_string_group_double.insert_num EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=52)
+          Output:["_col0","_col1","_col2","_col3","_col4"]
+          TableScan [TS_0] (rows=5 width=52)
+            default@table_change_string_group_double,table_change_string_group_double,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
@@ -836,72 +695,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_gro
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=151)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21"]
+          TableScan [TS_0] (rows=5 width=151)
+            default@table_change_numeric_group_string_group_multi_ints_string_group,table_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1056,72 +868,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=250)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
+          TableScan [TS_0] (rows=5 width=250)
+            default@table_change_numeric_group_string_group_floating_string_group,table_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
index b198354..98d6303 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
@@ -87,73 +87,25 @@ POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SIMPLE [(valu
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_permute_select
-                  Statistics: Num rows: 2 Data size: 33 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=16)
+            default@part_add_int_permute_select,part_add_int_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_permute_select
@@ -254,73 +206,25 @@ POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).c EXPRES
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_string_permute_select
-                  Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=19)
+            default@part_add_int_string_permute_select,part_add_int_string_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_string_permute_select
@@ -483,73 +387,25 @@ POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 SIMPLE [
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	double1	double1	double1	_c4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 284 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=5 width=56)
+            default@part_change_string_group_double,part_change_string_group_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
@@ -649,73 +505,25 @@ POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_date_group_string_group_date_timestamp
-                  Statistics: Num rows: 6 Data size: 926 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), c6 (type: string), c7 (type: char(50)), c8 (type: char(15)), c9 (type: varchar(50)), c10 (type: varchar(15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:string, c2:char(50), c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), c8:char(15), c9:varchar(50), c10:varchar(15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=154)
+            default@part_change_date_group_string_group_date_timestamp,part_change_date_group_string_group_date_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
@@ -892,73 +700,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 6 Data size: 918 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22"]
+          TableScan [TS_0] (rows=6 width=153)
+            default@part_change_numeric_group_string_group_multi_ints_string_group,part_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1117,73 +877,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 6 Data size: 1386 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
+          TableScan [TS_0] (rows=6 width=231)
+            default@part_change_numeric_group_string_group_floating_string_group,part_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
@@ -1330,73 +1042,25 @@ POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_string_group_string
-                  Statistics: Num rows: 6 Data size: 421 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 (type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:char(50), c2:char(9), c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, c8:char(50), c9:char(9), c10:string, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=70)
+            default@part_change_string_group_string_group_string,part_change_string_group_string_group_string,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
@@ -1577,73 +1241,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint P
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint
-                  Statistics: Num rows: 6 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 (type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 (type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 20
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    dataColumns: insert_num:int, c1:smallint, c2:int, c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
+          TableScan [TS_0] (rows=6 width=143)
+            default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint,part_change_lower_to_higher_numeric_group_tinyint_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
@@ -1754,73 +1370,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PA
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_decimal_to_float
-                  Statistics: Num rows: 6 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: float), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:float, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=6 width=71)
+            default@part_change_lower_to_higher_numeric_group_decimal_to_float,part_change_lower_to_higher_numeric_group_decimal_to_float,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_complex.q.out
index a331468..294a8f0 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_complex.q.out
@@ -155,55 +155,25 @@ POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).insert_num SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).s1 SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:s1, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct1_c_txt.insert_num	complex_struct1_c_txt.s1	complex_struct1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_struct1
-                  Statistics: Num rows: 6 Data size: 931 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), s1 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s1] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=6 width=155)
+            default@part_change_various_various_struct1,part_change_various_various_struct1,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","s1","b"]
 
 PREHOOK: query: select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
@@ -449,55 +419,25 @@ POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).b SIMPLE [
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).insert_num SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).s2 SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:s2, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct2_d_txt.insert_num	complex_struct2_d_txt.b	complex_struct2_d_txt.s2
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_various_various_struct2
-                  Statistics: Num rows: 8 Data size: 939 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s2 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s2] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=8 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=8 width=117)
+            default@part_add_various_various_struct2,part_add_various_various_struct2,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s2"]
 
 PREHOOK: query: select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
@@ -667,55 +607,25 @@ POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).insert_num SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).s3 SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:s3, type:struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>, comment:null), ]
 complex_struct4_c_txt.insert_num	complex_struct4_c_txt.b	complex_struct4_c_txt.s3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_to_various_various_struct4
-                  Statistics: Num rows: 4 Data size: 353 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s3 (type: struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary> of Column[s3] not supported
-                vectorized: false
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=4 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=4 width=88)
+            default@part_add_to_various_various_struct4,part_add_to_various_various_struct4,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s3"]
 
 PREHOOK: query: select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY


[57/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTSTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTSTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTSTransposeRule.java
index f81c21b..49e4bec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTSTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTSTransposeRule.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
+import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelOptUtil;
@@ -32,17 +33,28 @@ import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexOver;
 import org.apache.calcite.rex.RexUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 
 //TODO: Remove this once Calcite FilterProjectTransposeRule can take rule operand
 public class HiveFilterProjectTSTransposeRule extends RelOptRule {
 
+  public final static HiveFilterProjectTSTransposeRule INSTANCE =
+      new HiveFilterProjectTSTransposeRule(
+          Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
+          HiveRelFactories.HIVE_PROJECT_FACTORY, TableScan.class);
+
+  public final static  HiveFilterProjectTSTransposeRule INSTANCE_DRUID =
+      new HiveFilterProjectTSTransposeRule(
+          Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
+          HiveRelFactories.HIVE_PROJECT_FACTORY, DruidQuery.class);
+
   private final RelFactories.FilterFactory  filterFactory;
   private final RelFactories.ProjectFactory projectFactory;
 
-  public HiveFilterProjectTSTransposeRule(Class<? extends Filter> filterClass,
+  private HiveFilterProjectTSTransposeRule(Class<? extends Filter> filterClass,
       FilterFactory filterFactory, Class<? extends Project> projectClass,
-      ProjectFactory projectFactory, Class<? extends TableScan> tsClass) {
+      ProjectFactory projectFactory, Class<? extends RelNode> tsClass) {
     super(operand(filterClass, operand(projectClass, operand(tsClass, none()))));
     this.filterFactory = filterFactory;
     this.projectFactory = projectFactory;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTransposeRule.java
index d43c2c6..91d674d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterProjectTransposeRule.java
@@ -27,8 +27,6 @@ import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories.FilterFactory;
-import org.apache.calcite.rel.core.RelFactories.ProjectFactory;
 import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rex.RexCall;
@@ -37,6 +35,7 @@ import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexOver;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
@@ -44,25 +43,25 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 public class HiveFilterProjectTransposeRule extends FilterProjectTransposeRule {
 
   public static final HiveFilterProjectTransposeRule INSTANCE_DETERMINISTIC_WINDOWING =
-          new HiveFilterProjectTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY,
-          HiveProject.class, HiveRelFactories.HIVE_PROJECT_FACTORY, true, true);
+          new HiveFilterProjectTransposeRule(Filter.class, HiveProject.class,
+                  HiveRelFactories.HIVE_BUILDER, true, true);
 
   public static final HiveFilterProjectTransposeRule INSTANCE_DETERMINISTIC =
-          new HiveFilterProjectTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY,
-          HiveProject.class, HiveRelFactories.HIVE_PROJECT_FACTORY, true, false);
+          new HiveFilterProjectTransposeRule(Filter.class, HiveProject.class,
+                  HiveRelFactories.HIVE_BUILDER, true, false);
 
   public static final HiveFilterProjectTransposeRule INSTANCE =
-          new HiveFilterProjectTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY,
-          HiveProject.class, HiveRelFactories.HIVE_PROJECT_FACTORY, false, false);
+          new HiveFilterProjectTransposeRule(Filter.class, HiveProject.class,
+                  HiveRelFactories.HIVE_BUILDER, false, false);
 
   private final boolean onlyDeterministic;
 
   private final boolean pushThroughWindowing;
 
   private HiveFilterProjectTransposeRule(Class<? extends Filter> filterClass,
-      FilterFactory filterFactory, Class<? extends Project> projectClass,
-      ProjectFactory projectFactory, boolean onlyDeterministic,boolean pushThroughWindowing) {
-    super(filterClass, filterFactory, projectClass, projectFactory);
+      Class<? extends Project> projectClass, RelBuilderFactory relBuilderFactory,
+      boolean onlyDeterministic,boolean pushThroughWindowing) {
+    super(filterClass, projectClass, false, false, relBuilderFactory);
     this.onlyDeterministic = onlyDeterministic;
     this.pushThroughWindowing = pushThroughWindowing;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
index 7c2a7e5..dccb6a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
@@ -39,7 +39,6 @@ import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,9 +49,7 @@ import com.google.common.collect.Sets;
 
 public class HivePreFilteringRule extends RelOptRule {
 
-  protected static final Logger            LOG        = LoggerFactory
-                                                          .getLogger(HivePreFilteringRule.class
-                                                              .getName());
+  protected static final Logger LOG = LoggerFactory.getLogger(HivePreFilteringRule.class);
 
   private static final Set<SqlKind>        COMPARISON = EnumSet.of(SqlKind.EQUALS,
                                                           SqlKind.GREATER_THAN_OR_EQUAL,
@@ -209,7 +206,7 @@ public class HivePreFilteringRule extends RelOptRule {
     for (int i = 0; i < operands.size(); i++) {
       final RexNode operand = operands.get(i);
 
-      final RexNode operandCNF = HiveRexUtil.toCnf(rexBuilder, maxCNFNodeCount, operand);
+      final RexNode operandCNF = RexUtil.toCnf(rexBuilder, maxCNFNodeCount, operand);
       final List<RexNode> conjunctions = RelOptUtil.conjunctions(operandCNF);
 
       Set<String> refsInCurrentOperand = Sets.newHashSet();

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
index 2fc68ae..a1b5aeb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -16,59 +16,28 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
 
-import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptPredicateList;
-import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
-import org.apache.calcite.rel.rules.ValuesReduceRule;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rel.rules.ReduceExpressionsRule;
 import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexCorrelVariable;
-import org.apache.calcite.rex.RexDynamicParam;
-import org.apache.calcite.rex.RexFieldAccess;
 import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexOver;
-import org.apache.calcite.rex.RexRangeRef;
-import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.fun.SqlRowOperator;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Stacks;
-import org.apache.calcite.util.Util;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil.ExprSimplifier;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 
 /**
@@ -82,51 +51,55 @@ import com.google.common.collect.Lists;
  * is the same as the type of the resulting cast expression
  * </ul>
  */
-public abstract class HiveReduceExpressionsRule extends RelOptRule {
+public abstract class HiveReduceExpressionsRule extends ReduceExpressionsRule {
 
   protected static final Logger LOG = LoggerFactory.getLogger(HiveReduceExpressionsRule.class);
 
   //~ Static fields/initializers ---------------------------------------------
 
   /**
-   * Regular expression that matches the description of all instances of this
-   * rule and {@link ValuesReduceRule} also. Use
-   * it to prevent the planner from invoking these rules.
-   */
-  public static final Pattern EXCLUSION_PATTERN =
-      Pattern.compile("Reduce(Expressions|Values)Rule.*");
-
-  /**
    * Singleton rule that reduces constants inside a
-   * {@link org.apache.calcite.rel.logical.HiveFilter}.
+   * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter}.
    */
-  public static final HiveReduceExpressionsRule FILTER_INSTANCE =
-      new FilterReduceExpressionsRule(HiveFilter.class, HiveRelFactories.HIVE_BUILDER);
+  public static final ReduceExpressionsRule FILTER_INSTANCE =
+          new FilterReduceExpressionsRule(HiveFilter.class, HiveRelFactories.HIVE_BUILDER);
 
   /**
    * Singleton rule that reduces constants inside a
-   * {@link org.apache.calcite.rel.logical.HiveProject}.
+   * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject}.
    */
-  public static final HiveReduceExpressionsRule PROJECT_INSTANCE =
+  public static final ReduceExpressionsRule PROJECT_INSTANCE =
       new ProjectReduceExpressionsRule(HiveProject.class, HiveRelFactories.HIVE_BUILDER);
 
   /**
    * Singleton rule that reduces constants inside a
-   * {@link org.apache.calcite.rel.core.HiveJoin}.
+   * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin}.
    */
-  public static final HiveReduceExpressionsRule JOIN_INSTANCE =
+  public static final ReduceExpressionsRule JOIN_INSTANCE =
       new JoinReduceExpressionsRule(HiveJoin.class, HiveRelFactories.HIVE_BUILDER);
 
+  //~ Constructors -----------------------------------------------------------
+
+  /**
+   * Creates a HiveReduceExpressionsRule.
+   *
+   * @param clazz class of rels to which this rule should apply
+   */
+  protected HiveReduceExpressionsRule(Class<? extends RelNode> clazz,
+      RelBuilderFactory relBuilderFactory, String desc) {
+    super(clazz, relBuilderFactory, desc);
+  }
+
   /**
    * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Filter}.
    * If the condition is a constant, the filter is removed (if TRUE) or replaced with
    * an empty {@link org.apache.calcite.rel.core.Values} (if FALSE or NULL).
    */
-  public static class FilterReduceExpressionsRule extends HiveReduceExpressionsRule {
+  public static class FilterReduceExpressionsRule extends ReduceExpressionsRule {
 
     public FilterReduceExpressionsRule(Class<? extends Filter> filterClass,
         RelBuilderFactory relBuilderFactory) {
-      super(filterClass, relBuilderFactory, "HiveReduceExpressionsRule(Filter)");
+      super(filterClass, relBuilderFactory, "ReduceExpressionsRule(Filter)");
     }
 
     @Override public void onMatch(RelOptRuleCall call) {
@@ -135,8 +108,9 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
           Lists.newArrayList(filter.getCondition());
       RexNode newConditionExp;
       boolean reduced;
+      final RelMetadataQuery mq = RelMetadataQuery.instance();
       final RelOptPredicateList predicates =
-          RelMetadataQuery.instance().getPulledUpPredicates(filter.getInput());
+          mq.getPulledUpPredicates(filter.getInput());
       if (reduceExpressions(filter, expList, predicates, true)) {
         assert expList.size() == 1;
         newConditionExp = expList.get(0);
@@ -154,795 +128,95 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
       // predicate to see if it was already a constant,
       // in which case we don't need any runtime decision
       // about filtering.
-      // TODO: support LogicalValues
       if (newConditionExp.isAlwaysTrue()) {
         call.transformTo(
             filter.getInput());
       } else if (reduced) {
+        if (RexUtil.isNullabilityCast(filter.getCluster().getTypeFactory(),
+            newConditionExp)) {
+          newConditionExp = ((RexCall) newConditionExp).getOperands().get(0);
+        }
         call.transformTo(call.builder().
             push(filter.getInput()).filter(newConditionExp).build());
       } else {
+        if (newConditionExp instanceof RexCall) {
+          RexCall rexCall = (RexCall) newConditionExp;
+          boolean reverse = rexCall.getKind() == SqlKind.NOT;
+          if (reverse) {
+            if (!(rexCall.getOperands().get(0) instanceof RexCall)) {
+              // If child is not a RexCall instance, we can bail out
+              return;
+            }
+            rexCall = (RexCall) rexCall.getOperands().get(0);
+          }
+          reduceNotNullableFilter(call, filter, rexCall, reverse);
+        }
         return;
       }
 
       // New plan is absolutely better than old plan.
       call.getPlanner().setImportance(filter, 0.0);
     }
-  }
-
-  /**
-   * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.Project}.
-   */
-  public static class ProjectReduceExpressionsRule extends HiveReduceExpressionsRule {
-
-    public ProjectReduceExpressionsRule(Class<? extends Project> projectClass,
-        RelBuilderFactory relBuilderFactory) {
-      super(projectClass, relBuilderFactory, "HiveReduceExpressionsRule(Project)");
-    }
-
-    @Override public void onMatch(RelOptRuleCall call) {
-      Project project = call.rel(0);
-      final RelOptPredicateList predicates =
-          RelMetadataQuery.instance().getPulledUpPredicates(project.getInput());
-      final List<RexNode> expList =
-          Lists.newArrayList(project.getProjects());
-      if (reduceExpressions(project, expList, predicates)) {
-        RelNode newProject = call.builder().push(project.getInput())
-            .project(expList, project.getRowType().getFieldNames()).build();
-        call.transformTo(newProject);
 
-        // New plan is absolutely better than old plan.
-        call.getPlanner().setImportance(project, 0.0);
-      }
-    }
-  }
-
-  /**
-   * Rule that reduces constants inside a {@link org.apache.calcite.rel.core.HiveJoin}.
-   */
-  public static class JoinReduceExpressionsRule extends HiveReduceExpressionsRule {
-
-    public JoinReduceExpressionsRule(Class<? extends HiveJoin> joinClass,
-        RelBuilderFactory relBuilderFactory) {
-      super(joinClass, relBuilderFactory, "HiveReduceExpressionsRule(HiveJoin)");
-    }
-
-    @Override public void onMatch(RelOptRuleCall call) {
-      final HiveJoin join = call.rel(0);
-      final List<RexNode> expList = Lists.newArrayList(join.getCondition());
-      final int fieldCount = join.getLeft().getRowType().getFieldCount();
-      RelMetadataQuery mq = RelMetadataQuery.instance();
-      final RelOptPredicateList leftPredicates =
-          mq.getPulledUpPredicates(join.getLeft());
-      final RelOptPredicateList rightPredicates =
-          mq.getPulledUpPredicates(join.getRight());
-      final RelOptPredicateList predicates =
-          leftPredicates.union(rightPredicates.shift(fieldCount));
-      if (!reduceExpressions(join, expList, predicates, true)) {
-        return;
-      }
-      call.transformTo(
-          join.copy(
-              join.getTraitSet(),
-              expList.get(0),
-              join.getLeft(),
-              join.getRight(),
-              join.getJoinType(),
-              join.isSemiJoinDone()));
-
-      // New plan is absolutely better than old plan.
-      call.getPlanner().setImportance(join, 0.0);
-    }
-  }
-
-  //~ Constructors -----------------------------------------------------------
-
-  /**
-   * Creates a HiveReduceExpressionsRule.
-   *
-   * @param clazz class of rels to which this rule should apply
-   */
-  protected HiveReduceExpressionsRule(Class<? extends RelNode> clazz,
-      RelBuilderFactory relBuilderFactory, String desc) {
-    super(operand(clazz, any()), relBuilderFactory, desc);
-  }
-
-  //~ Methods ----------------------------------------------------------------
-
-  /**
-   * Reduces a list of expressions.
-   *
-   * @param rel     Relational expression
-   * @param expList List of expressions, modified in place
-   * @param predicates Constraints known to hold on input expressions
-   * @return whether reduction found something to change, and succeeded
-   */
-  protected static boolean reduceExpressions(RelNode rel, List<RexNode> expList,
-      RelOptPredicateList predicates) {
-    return reduceExpressions(rel, expList, predicates, false);
-  }
-
-  /**
-   * Reduces a list of expressions.
-   *
-   * @param rel     Relational expression
-   * @param expList List of expressions, modified in place
-   * @param predicates Constraints known to hold on input expressions
-   * @param unknownAsFalse Whether UNKNOWN will be treated as FALSE
-   *
-   * @return whether reduction found something to change, and succeeded
-   */
-  protected static boolean reduceExpressions(RelNode rel, List<RexNode> expList,
-      RelOptPredicateList predicates, boolean unknownAsFalse) {
-    RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
-
-    boolean reduced = reduceExpressionsInternal(rel, expList, predicates);
-
-    // Simplify preds in place
-    ExprSimplifier simplifier = new ExprSimplifier(rexBuilder, unknownAsFalse);
-    List<RexNode> expList2 = Lists.newArrayList(expList);
-    simplifier.mutate(expList2);
-    boolean simplified = false;
-    for (int i = 0; i < expList.size(); i++) {
-      if (!expList2.get(i).toString().equals(expList.get(i).toString())) {
-        expList.remove(i);
-        expList.add(i, expList2.get(i));
-        simplified = true;
-      }
-    }
-
-    return reduced || simplified;
-  }
-
-  protected static boolean reduceExpressionsInternal(RelNode rel, List<RexNode> expList,
-      RelOptPredicateList predicates) {
-    RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
-
-    // Replace predicates on CASE to CASE on predicates.
-    new CaseShuttle().mutate(expList);
-
-    // Find reducible expressions.
-    final List<RexNode> constExps = Lists.newArrayList();
-    List<Boolean> addCasts = Lists.newArrayList();
-    final List<RexNode> removableCasts = Lists.newArrayList();
-    final ImmutableMap<RexNode, RexNode> constants =
-        predicateConstants(RexNode.class, rexBuilder, predicates);
-    findReducibleExps(rel.getCluster().getTypeFactory(), expList, constants,
-        constExps, addCasts, removableCasts);
-    if (constExps.isEmpty() && removableCasts.isEmpty()) {
-      return false;
-    }
-
-    // Remove redundant casts before reducing constant expressions.
-    // If the argument to the redundant cast is a reducible constant,
-    // reducing that argument to a constant first will result in not being
-    // able to locate the original cast expression.
-    if (!removableCasts.isEmpty()) {
-      final List<RexNode> reducedExprs = Lists.newArrayList();
-      for (RexNode exp : removableCasts) {
-        RexCall call = (RexCall) exp;
-        reducedExprs.add(call.getOperands().get(0));
-      }
-      RexReplacer replacer =
-          new RexReplacer(
-              rexBuilder,
-              removableCasts,
-              reducedExprs,
-              Collections.nCopies(removableCasts.size(), false));
-      replacer.mutate(expList);
-    }
-
-    if (constExps.isEmpty()) {
-      return true;
-    }
-
-    final List<RexNode> constExps2 = Lists.newArrayList(constExps);
-    if (!constants.isEmpty()) {
-      //noinspection unchecked
-      final List<Map.Entry<RexNode, RexNode>> pairs =
-          (List<Map.Entry<RexNode, RexNode>>) (List)
-              Lists.newArrayList(constants.entrySet());
-      RexReplacer replacer =
-          new RexReplacer(
-              rexBuilder,
-              Pair.left(pairs),
-              Pair.right(pairs),
-              Collections.nCopies(pairs.size(), false));
-      replacer.mutate(constExps2);
-    }
-
-    // Compute the values they reduce to.
-    RelOptPlanner.Executor executor =
-        rel.getCluster().getPlanner().getExecutor();
-    if (executor == null) {
-      // Cannot reduce expressions: caller has not set an executor in their
-      // environment. Caller should execute something like the following before
-      // invoking the planner:
-      //
-      // final RexExecutorImpl executor =
-      //   new RexExecutorImpl(Schemas.createDataContext(null));
-      // rootRel.getCluster().getPlanner().setExecutor(executor);
-      return false;
-    }
-
-    final List<RexNode> reducedValues = Lists.newArrayList();
-    executor.reduce(rexBuilder, constExps2, reducedValues);
-
-    // Use RexNode.digest to judge whether each newly generated RexNode
-    // is equivalent to the original one.
-    if (Lists.transform(constExps, HiveCalciteUtil.REX_STR_FN).equals(
-            Lists.transform(reducedValues, HiveCalciteUtil.REX_STR_FN))) {
-      return false;
-    }
-
-    // For Project, we have to be sure to preserve the result
-    // types, so always cast regardless of the expression type.
-    // For other RelNodes like Filter, in general, this isn't necessary,
-    // and the presence of casts could hinder other rules such as sarg
-    // analysis, which require bare literals.  But there are special cases,
-    // like when the expression is a UDR argument, that need to be
-    // handled as special cases.
-    if (rel instanceof Project) {
-      addCasts = Collections.nCopies(reducedValues.size(), true);
-    }
-
-    RexReplacer replacer =
-        new RexReplacer(
-            rexBuilder,
-            constExps,
-            reducedValues,
-            addCasts);
-    replacer.mutate(expList);
-    return true;
-  }
-
-  /**
-   * Locates expressions that can be reduced to literals or converted to
-   * expressions with redundant casts removed.
-   *
-   * @param typeFactory    Type factory
-   * @param exps           list of candidate expressions to be examined for
-   *                       reduction
-   * @param constants      List of expressions known to be constant
-   * @param constExps      returns the list of expressions that can be constant
-   *                       reduced
-   * @param addCasts       indicator for each expression that can be constant
-   *                       reduced, whether a cast of the resulting reduced
-   *                       expression is potentially necessary
-   * @param removableCasts returns the list of cast expressions where the cast
-   */
-  protected static void findReducibleExps(RelDataTypeFactory typeFactory,
-      List<RexNode> exps, ImmutableMap<RexNode, RexNode> constants,
-      List<RexNode> constExps, List<Boolean> addCasts,
-      List<RexNode> removableCasts) {
-    ReducibleExprLocator gardener =
-        new ReducibleExprLocator(typeFactory, constants, constExps,
-            addCasts, removableCasts);
-    for (RexNode exp : exps) {
-      gardener.analyze(exp);
-    }
-    assert constExps.size() == addCasts.size();
-  }
-
-  /** Creates a map containing each (e, constant) pair that occurs within
-   * a predicate list.
-   *
-   * @param clazz Class of expression that is considered constant
-   * @param rexBuilder Rex builder
-   * @param predicates Predicate list
-   * @param <C> what to consider a constant: {@link RexLiteral} to use a narrow
-   *           definition of constant, or {@link RexNode} to use
-   *           {@link RexUtil#isConstant(RexNode)}
-   * @return Map from values to constants
-   */
-  public static <C extends RexNode> ImmutableMap<RexNode, C> predicateConstants(
-          Class<C> clazz, RexBuilder rexBuilder, RelOptPredicateList predicates) {
-    // We cannot use an ImmutableMap.Builder here. If there are multiple entries
-    // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't
-    // matter which we take, so the latter will replace the former.
-    // The basic idea is to find all the pairs of RexNode = RexLiteral
-    // (1) If 'predicates' contain a non-EQUALS, we bail out.
-    // (2) It is OK if a RexNode is equal to the same RexLiteral several times,
-    // (e.g. "WHERE deptno = 1 AND deptno = 1")
-    // (3) It will return false if there are inconsistent constraints (e.g.
-    // "WHERE deptno = 1 AND deptno = 2")
-    final Map<RexNode, C> map = new HashMap<>();
-    final Set<RexNode> excludeSet = new HashSet<>();
-    for (RexNode predicate : predicates.pulledUpPredicates) {
-      gatherConstraints(clazz, predicate, map, excludeSet, rexBuilder);
-    }
-    final ImmutableMap.Builder<RexNode, C> builder =
-        ImmutableMap.builder();
-    for (Map.Entry<RexNode, C> entry : map.entrySet()) {
-      RexNode rexNode = entry.getKey();
-      if (!overlap(rexNode, excludeSet)) {
-        builder.put(rexNode, entry.getValue());
-      }
-    }
-    return builder.build();
-  }
-
-  private static <C extends RexNode> void gatherConstraints(Class<C> clazz,
-      RexNode predicate, Map<RexNode, C> map, Set<RexNode> excludeSet,
-      RexBuilder rexBuilder) {
-    if (predicate.getKind() != SqlKind.EQUALS
-            && predicate.getKind() != SqlKind.IS_NULL) {
-      decompose(excludeSet, predicate);
-      return;
-    }
-    final List<RexNode> operands = ((RexCall) predicate).getOperands();
-    if (operands.size() != 2 && predicate.getKind() == SqlKind.EQUALS) {
-      decompose(excludeSet, predicate);
-      return;
-    }
-    // if it reaches here, we have rexNode equals rexNode
-    final RexNode left;
-    final RexNode right;
-    if (predicate.getKind() == SqlKind.EQUALS) {
-      left = operands.get(0);
-      right = operands.get(1);
-    } else {
-      left = operands.get(0);
-      right = rexBuilder.makeNullLiteral(left.getType().getSqlTypeName());
-    }
-    // note that literals are immutable too and they can only be compared through
-    // values.
-    gatherConstraint(clazz, left, right, map, excludeSet, rexBuilder);
-    gatherConstraint(clazz, right, left, map, excludeSet, rexBuilder);
-  }
-
-  /** Returns whether a value of {@code type2} can be assigned to a variable
-   * of {@code type1}.
-   *
-   * <p>For example:
-   * <ul>
-   *   <li>{@code canAssignFrom(BIGINT, TINYINT)} returns {@code true}</li>
-   *   <li>{@code canAssignFrom(TINYINT, BIGINT)} returns {@code false}</li>
-   *   <li>{@code canAssignFrom(BIGINT, VARCHAR)} returns {@code false}</li>
-   * </ul>
-   */
-  private static boolean canAssignFrom(RelDataType type1, RelDataType type2) {
-    final SqlTypeName name1 = type1.getSqlTypeName();
-    final SqlTypeName name2 = type2.getSqlTypeName();
-    if (name1.getFamily() == name2.getFamily()) {
-      switch (name1.getFamily()) {
-      case NUMERIC:
-        return name1.compareTo(name2) >= 0;
+    /**
+     * For static schema systems, a filter that is always false or null can be
+     * replaced by a values operator that produces no rows, as the schema
+     * information can just be taken from the input Rel. In dynamic schema
+     * environments, the filter might have an unknown input type, in these cases
+     * they must define a system specific alternative to a Values operator, such
+     * as inserting a limit 0 instead of a filter on top of the original input.
+     *
+     * <p>The default implementation of this method is to call
+     * {@link RelBuilder#empty}, which for the static schema will be optimized
+     * to an empty
+     * {@link org.apache.calcite.rel.core.Values}.
+     *
+     * @param input rel to replace, assumes caller has already determined
+     *              equivalence to Values operation for 0 records or a
+     *              false filter.
+     * @return equivalent but less expensive replacement rel
+     */
+    protected RelNode createEmptyRelOrEquivalent(RelOptRuleCall call, Filter input) {
+      return call.builder().push(input).empty().build();
+    }
+
+    private void reduceNotNullableFilter(
+        RelOptRuleCall call,
+        Filter filter,
+        RexCall rexCall,
+        boolean reverse) {
+      // If the expression is a IS [NOT] NULL on a non-nullable
+      // column, then we can either remove the filter or replace
+      // it with an Empty.
+      boolean alwaysTrue;
+      switch (rexCall.getKind()) {
+      case IS_NULL:
+      case IS_UNKNOWN:
+        alwaysTrue = false;
+        break;
+      case IS_NOT_NULL:
+        alwaysTrue = true;
+        break;
       default:
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private static <C extends RexNode> void gatherConstraint(Class<C> clazz,
-      RexNode left, RexNode right, Map<RexNode, C> map, Set<RexNode> excludeSet,
-      RexBuilder rexBuilder) {
-    if (!clazz.isInstance(right)) {
-      return;
-    }
-    if (!RexUtil.isConstant(right)) {
-      return;
-    }
-    C constant = clazz.cast(right);
-    if (excludeSet.contains(left)) {
-      return;
-    }
-    final C existedValue = map.get(left);
-    if (existedValue == null) {
-      switch (left.getKind()) {
-      case CAST:
-        // Convert "CAST(c) = literal" to "c = literal", as long as it is a
-        // widening cast.
-        final RexNode operand = ((RexCall) left).getOperands().get(0);
-        if (canAssignFrom(left.getType(), operand.getType())) {
-          final RexNode castRight =
-              rexBuilder.makeCast(operand.getType(), constant);
-          if (castRight instanceof RexLiteral) {
-            left = operand;
-            constant = clazz.cast(castRight);
-          }
-        }
-      }
-      map.put(left, constant);
-    } else {
-      if (existedValue instanceof RexLiteral
-          && constant instanceof RexLiteral
-          && !((RexLiteral) existedValue).getValue()
-              .equals(((RexLiteral) constant).getValue())) {
-        // we found conflicting values, e.g. left = 10 and left = 20
-        map.remove(left);
-        excludeSet.add(left);
-      }
-    }
-  }
-
-  private static boolean overlap(RexNode rexNode, Set<RexNode> set) {
-    if (rexNode instanceof RexCall) {
-      for (RexNode r : ((RexCall) rexNode).getOperands()) {
-        if (overlap(r, set)) {
-          return true;
-        }
-      }
-      return false;
-    } else {
-      return set.contains(rexNode);
-    }
-  }
-
-  /** Tries to decompose the RexNode which is a RexCall into non-literal
-   * RexNodes. */
-  private static void decompose(Set<RexNode> set, RexNode rexNode) {
-    if (rexNode instanceof RexCall) {
-      for (RexNode r : ((RexCall) rexNode).getOperands()) {
-        decompose(set, r);
-      }
-    } else if (!(rexNode instanceof RexLiteral)) {
-      set.add(rexNode);
-    }
-  }
-
-  /** Pushes predicates into a CASE.
-   *
-   * <p>We have a loose definition of 'predicate': any boolean expression will
-   * do, except CASE. For example '(CASE ...) = 5' or '(CASE ...) IS NULL'.
-   */
-  protected static RexCall pushPredicateIntoCase(RexCall call) {
-    if (call.getType().getSqlTypeName() != SqlTypeName.BOOLEAN) {
-      return call;
-    }
-    switch (call.getKind()) {
-    case CASE:
-    case AND:
-    case OR:
-      return call; // don't push CASE into CASE!
-    }
-    int caseOrdinal = -1;
-    final List<RexNode> operands = call.getOperands();
-    for (int i = 0; i < operands.size(); i++) {
-      RexNode operand = operands.get(i);
-      switch (operand.getKind()) {
-      case CASE:
-        caseOrdinal = i;
-      }
-    }
-    if (caseOrdinal < 0) {
-      return call;
-    }
-    // Convert
-    //   f(CASE WHEN p1 THEN v1 ... END, arg)
-    // to
-    //   CASE WHEN p1 THEN f(v1, arg) ... END
-    final RexCall case_ = (RexCall) operands.get(caseOrdinal);
-    final List<RexNode> nodes = new ArrayList<>();
-    for (int i = 0; i < case_.getOperands().size(); i++) {
-      RexNode node = case_.getOperands().get(i);
-      if (!RexUtil.isCasePredicate(case_, i)) {
-        node = substitute(call, caseOrdinal, node);
-      }
-      nodes.add(node);
-    }
-    return case_.clone(call.getType(), nodes);
-  }
-
-  /** Converts op(arg0, ..., argOrdinal, ..., argN) to op(arg0,..., node, ..., argN). */
-  protected static RexNode substitute(RexCall call, int ordinal, RexNode node) {
-    final List<RexNode> newOperands = Lists.newArrayList(call.getOperands());
-    newOperands.set(ordinal, node);
-    return call.clone(call.getType(), newOperands);
-  }
-
-  //~ Inner Classes ----------------------------------------------------------
-
-  /**
-   * Replaces expressions with their reductions. Note that we only have to
-   * look for RexCall, since nothing else is reducible in the first place.
-   */
-  protected static class RexReplacer extends RexShuttle {
-    private final RexBuilder rexBuilder;
-    private final List<RexNode> reducibleExps;
-    private final List<RexNode> reducedValues;
-    private final List<Boolean> addCasts;
-
-    RexReplacer(
-        RexBuilder rexBuilder,
-        List<RexNode> reducibleExps,
-        List<RexNode> reducedValues,
-        List<Boolean> addCasts) {
-      this.rexBuilder = rexBuilder;
-      this.reducibleExps = reducibleExps;
-      this.reducedValues = reducedValues;
-      this.addCasts = addCasts;
-    }
-
-    @Override public RexNode visitInputRef(RexInputRef inputRef) {
-      RexNode node = visit(inputRef);
-      if (node == null) {
-        return super.visitInputRef(inputRef);
-      }
-      return node;
-    }
-
-    @Override public RexNode visitCall(RexCall call) {
-      RexNode node = visit(call);
-      if (node != null) {
-        return node;
-      }
-      node = super.visitCall(call);
-      if (node != call) {
-        node = HiveRexUtil.simplify(rexBuilder, node);
-      }
-      return node;
-    }
-
-    private RexNode visit(final RexNode call) {
-      int i = reducibleExps.indexOf(call);
-      if (i == -1) {
-        return null;
-      }
-      RexNode replacement = reducedValues.get(i);
-      if (addCasts.get(i)
-          && (replacement.getType() != call.getType())) {
-        // Handle change from nullable to NOT NULL by claiming
-        // that the result is still nullable, even though
-        // we know it isn't.
-        //
-        // Also, we cannot reduce CAST('abc' AS VARCHAR(4)) to 'abc'.
-        // If we make 'abc' of type VARCHAR(4), we may later encounter
-        // the same expression in a Project's digest where it has
-        // type VARCHAR(3), and that's wrong.
-        replacement = rexBuilder.makeAbstractCast(call.getType(), replacement);
-      }
-      return replacement;
-    }
-  }
-
-  /**
-   * Helper class used to locate expressions that either can be reduced to
-   * literals or contain redundant casts.
-   */
-  protected static class ReducibleExprLocator extends RexVisitorImpl<Void> {
-    /** Whether an expression is constant, and if so, whether it can be
-     * reduced to a simpler constant. */
-    enum Constancy {
-      NON_CONSTANT, REDUCIBLE_CONSTANT, IRREDUCIBLE_CONSTANT
-    }
-
-    private final RelDataTypeFactory typeFactory;
-
-    private final List<Constancy> stack;
-
-    private final ImmutableMap<RexNode, RexNode> constants;
-
-    private final List<RexNode> constExprs;
-
-    private final List<Boolean> addCasts;
-
-    private final List<RexNode> removableCasts;
-
-    private final List<SqlOperator> parentCallTypeStack;
-
-    ReducibleExprLocator(RelDataTypeFactory typeFactory,
-        ImmutableMap<RexNode, RexNode> constants, List<RexNode> constExprs,
-        List<Boolean> addCasts, List<RexNode> removableCasts) {
-      // go deep
-      super(true);
-      this.typeFactory = typeFactory;
-      this.constants = constants;
-      this.constExprs = constExprs;
-      this.addCasts = addCasts;
-      this.removableCasts = removableCasts;
-      this.stack = Lists.newArrayList();
-      this.parentCallTypeStack = Lists.newArrayList();
-    }
-
-    public void analyze(RexNode exp) {
-      assert stack.isEmpty();
-
-      exp.accept(this);
-
-      // Deal with top of stack
-      assert stack.size() == 1;
-      assert parentCallTypeStack.isEmpty();
-      Constancy rootConstancy = stack.get(0);
-      if (rootConstancy == Constancy.REDUCIBLE_CONSTANT) {
-        // The entire subtree was constant, so add it to the result.
-        addResult(exp);
-      }
-      stack.clear();
-    }
-
-    private Void pushVariable() {
-      stack.add(Constancy.NON_CONSTANT);
-      return null;
-    }
-
-    private void addResult(RexNode exp) {
-      // Cast of literal can't be reduced, so skip those (otherwise we'd
-      // go into an infinite loop as we add them back).
-      if (exp.getKind() == SqlKind.CAST) {
-        RexCall cast = (RexCall) exp;
-        RexNode operand = cast.getOperands().get(0);
-        if (operand instanceof RexLiteral) {
-          return;
-        }
-      }
-      constExprs.add(exp);
-
-      // In the case where the expression corresponds to a UDR argument,
-      // we need to preserve casts.  Note that this only applies to
-      // the topmost argument, not expressions nested within the UDR
-      // call.
-      //
-      // REVIEW zfong 6/13/08 - Are there other expressions where we
-      // also need to preserve casts?
-      if (parentCallTypeStack.isEmpty()) {
-        addCasts.add(false);
-      } else {
-        addCasts.add(isUdf(Stacks.peek(parentCallTypeStack)));
-      }
-    }
-
-    private Boolean isUdf(SqlOperator operator) {
-      // return operator instanceof UserDefinedRoutine
-      return false;
-    }
-
-    public Void visitInputRef(RexInputRef inputRef) {
-      if (constants.containsKey(inputRef)) {
-        stack.add(Constancy.REDUCIBLE_CONSTANT);
-        return null;
-      }
-      return pushVariable();
-    }
-
-    public Void visitLiteral(RexLiteral literal) {
-      stack.add(Constancy.IRREDUCIBLE_CONSTANT);
-      return null;
-    }
-
-    public Void visitOver(RexOver over) {
-      // assume non-constant (running SUM(1) looks constant but isn't)
-      analyzeCall(over, Constancy.NON_CONSTANT);
-      return null;
-    }
-
-    public Void visitCorrelVariable(RexCorrelVariable correlVariable) {
-      return pushVariable();
-    }
-
-    public Void visitCall(RexCall call) {
-      // assume REDUCIBLE_CONSTANT until proven otherwise
-      analyzeCall(call, Constancy.REDUCIBLE_CONSTANT);
-      return null;
-    }
-
-    private void analyzeCall(RexCall call, Constancy callConstancy) {
-      Stacks.push(parentCallTypeStack, call.getOperator());
-
-      // visit operands, pushing their states onto stack
-      super.visitCall(call);
-
-      // look for NON_CONSTANT operands
-      int operandCount = call.getOperands().size();
-      List<Constancy> operandStack = Util.last(stack, operandCount);
-      for (Constancy operandConstancy : operandStack) {
-        if (operandConstancy == Constancy.NON_CONSTANT) {
-          callConstancy = Constancy.NON_CONSTANT;
-        }
-      }
-
-      // Even if all operands are constant, the call itself may
-      // be non-deterministic.
-      if (!call.getOperator().isDeterministic()) {
-        callConstancy = Constancy.NON_CONSTANT;
-      } else if (call.getOperator().isDynamicFunction()) {
-        // We can reduce the call to a constant, but we can't
-        // cache the plan if the function is dynamic.
-        // For now, treat it same as non-deterministic.
-        callConstancy = Constancy.NON_CONSTANT;
-      }
-
-      // Row operator itself can't be reduced to a literal, but if
-      // the operands are constants, we still want to reduce those
-      if ((callConstancy == Constancy.REDUCIBLE_CONSTANT)
-          && (call.getOperator() instanceof SqlRowOperator)) {
-        callConstancy = Constancy.NON_CONSTANT;
+        return;
       }
-
-      if (callConstancy == Constancy.NON_CONSTANT) {
-        // any REDUCIBLE_CONSTANT children are now known to be maximal
-        // reducible subtrees, so they can be added to the result
-        // list
-        for (int iOperand = 0; iOperand < operandCount; ++iOperand) {
-          Constancy constancy = operandStack.get(iOperand);
-          if (constancy == Constancy.REDUCIBLE_CONSTANT) {
-            addResult(call.getOperands().get(iOperand));
+      if (reverse) {
+        alwaysTrue = !alwaysTrue;
+      }
+      RexNode operand = rexCall.getOperands().get(0);
+      if (operand instanceof RexInputRef) {
+        RexInputRef inputRef = (RexInputRef) operand;
+        if (!inputRef.getType().isNullable()) {
+          if (alwaysTrue) {
+            call.transformTo(filter.getInput());
+          } else {
+            call.transformTo(createEmptyRelOrEquivalent(call, filter));
           }
         }
-
-        // if this cast expression can't be reduced to a literal,
-        // then see if we can remove the cast
-        if (call.getOperator() == SqlStdOperatorTable.CAST) {
-          reduceCasts(call);
-        }
       }
-
-      // pop operands off of the stack
-      operandStack.clear();
-
-      // pop this parent call operator off the stack
-      Stacks.pop(parentCallTypeStack, call.getOperator());
-
-      // push constancy result for this call onto stack
-      stack.add(callConstancy);
-    }
-
-    private void reduceCasts(RexCall outerCast) {
-      List<RexNode> operands = outerCast.getOperands();
-      if (operands.size() != 1) {
-        return;
-      }
-      RelDataType outerCastType = outerCast.getType();
-      RelDataType operandType = operands.get(0).getType();
-      if (operandType.equals(outerCastType)) {
-        removableCasts.add(outerCast);
-        return;
-      }
-
-      // See if the reduction
-      // CAST((CAST x AS type) AS type NOT NULL)
-      // -> CAST(x AS type NOT NULL)
-      // applies.  TODO jvs 15-Dec-2008:  consider
-      // similar cases for precision changes.
-      if (!(operands.get(0) instanceof RexCall)) {
-        return;
-      }
-      RexCall innerCast = (RexCall) operands.get(0);
-      if (innerCast.getOperator() != SqlStdOperatorTable.CAST) {
-        return;
-      }
-      if (innerCast.getOperands().size() != 1) {
-        return;
-      }
-      RelDataType outerTypeNullable =
-          typeFactory.createTypeWithNullability(outerCastType, true);
-      RelDataType innerTypeNullable =
-          typeFactory.createTypeWithNullability(operandType, true);
-      if (outerTypeNullable != innerTypeNullable) {
-        return;
-      }
-      if (operandType.isNullable()) {
-        removableCasts.add(innerCast);
-      }
-    }
-
-    public Void visitDynamicParam(RexDynamicParam dynamicParam) {
-      return pushVariable();
-    }
-
-    public Void visitRangeRef(RexRangeRef rangeRef) {
-      return pushVariable();
-    }
-
-    public Void visitFieldAccess(RexFieldAccess fieldAccess) {
-      return pushVariable();
     }
   }
 
-  /** Shuttle that pushes predicates into a CASE. */
-  protected static class CaseShuttle extends RexShuttle {
-    @Override public RexNode visitCall(RexCall call) {
-      for (;;) {
-        call = (RexCall) super.visitCall(call);
-        final RexCall old = call;
-        call = pushPredicateIntoCase(call);
-        if (call == old) {
-          return call;
-        }
-      }
-    }
-  }
 }
 
 // End HiveReduceExpressionsRule.java

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
index ec488fe..0644f0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
@@ -38,7 +38,6 @@ import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.util.Pair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
@@ -131,7 +130,7 @@ public class HiveReduceExpressionsWithStatsRule extends RelOptRule {
             && call.operands.get(0) instanceof RexLiteral) {
           ref = (RexInputRef) call.operands.get(1);
           literal = (RexLiteral) call.operands.get(0);
-          kind = HiveRexUtil.invert(call.getOperator().getKind());
+          kind = call.getOperator().getKind().reverse();
         }
 
         // Found an expression that we can try to reduce
@@ -252,7 +251,7 @@ public class HiveReduceExpressionsWithStatsRule extends RelOptRule {
       // If we did not reduce, check the children nodes
       RexNode node = super.visitCall(call);
       if (node != call) {
-        node = HiveRexUtil.simplify(rexBuilder, node);
+        node = RexUtil.simplify(rexBuilder, node);
       }
       return node;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
index b0cb8df..458fee7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
+import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.LinkedHashSet;
@@ -24,28 +25,22 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.linq4j.Ord;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollations;
-import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.CorrelationId;
 import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.Sort;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexCorrelVariable;
 import org.apache.calcite.rex.RexFieldAccess;
-import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexPermuteInputsShuttle;
-import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.rex.RexVisitor;
 import org.apache.calcite.sql.validate.SqlValidator;
 import org.apache.calcite.sql2rel.CorrelationReferenceFinder;
@@ -62,7 +57,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.parse.ColumnAccessInfo;
 
 import com.google.common.collect.ImmutableList;
@@ -72,11 +66,9 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
   protected static final Log LOG = LogFactory.getLog(HiveRelFieldTrimmer.class);
 
-  private RelBuilder relBuilder;
-
   private ColumnAccessInfo columnAccessInfo;
-
   private Map<HiveProject, Table> viewProjectToTableSchema;
+  private final RelBuilder relBuilder;
 
   public HiveRelFieldTrimmer(SqlValidator validator, RelBuilder relBuilder) {
     super(validator, relBuilder);
@@ -86,9 +78,9 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
   public HiveRelFieldTrimmer(SqlValidator validator, RelBuilder relBuilder,
       ColumnAccessInfo columnAccessInfo, Map<HiveProject, Table> viewToTableSchema) {
     super(validator, relBuilder);
-    this.relBuilder = relBuilder;
     this.columnAccessInfo = columnAccessInfo;
     this.viewProjectToTableSchema = viewToTableSchema;
+    this.relBuilder = relBuilder;
   }
 
   /**
@@ -193,186 +185,73 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
   /**
    * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
-   * {@link org.apache.calcite.rel.core.Sort}.
+   * {@link org.apache.calcite.adapter.druid.DruidQuery}.
    */
-  public TrimResult trimFields(
-      HiveSortLimit sort,
-      ImmutableBitSet fieldsUsed,
+  public TrimResult trimFields(DruidQuery dq, ImmutableBitSet fieldsUsed,
       Set<RelDataTypeField> extraFields) {
-    final RelDataType rowType = sort.getRowType();
-    final int fieldCount = rowType.getFieldCount();
-    final RelCollation collation = sort.getCollation();
-    final RelNode input = sort.getInput();
-    RelOptCluster cluster = sort.getCluster();
-
-    // We use the fields used by the consumer, plus any fields used as sort
-    // keys.
-    final ImmutableBitSet.Builder inputFieldsUsed =
-        ImmutableBitSet.builder(fieldsUsed);
-    for (RelFieldCollation field : collation.getFieldCollations()) {
-      inputFieldsUsed.set(field.getFieldIndex());
+    final int fieldCount = dq.getRowType().getFieldCount();
+    if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount))
+        && extraFields.isEmpty()) {
+      // if there is nothing to project or if we are projecting everything
+      // then no need to introduce another RelNode
+      return trimFields(
+          (RelNode) dq, fieldsUsed, extraFields);
     }
-
-    // Create input with trimmed columns.
-    final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
-    TrimResult trimResult =
-        trimChild(sort, input, inputFieldsUsed.build(), inputExtraFields);
-    RelNode newInput = trimResult.left;
-    final Mapping inputMapping = trimResult.right;
-
-    // If the input is unchanged, and we need to project all columns,
-    // there's nothing we can do.
-    if (newInput == input
-        && inputMapping.isIdentity()
-        && fieldsUsed.cardinality() == fieldCount) {
-      return result(sort, Mappings.createIdentity(fieldCount));
+    final RelNode newTableAccessRel = project(dq, fieldsUsed, extraFields, relBuilder);
+
+    // Some parts of the system can't handle rows with zero fields, so
+    // pretend that one field is used.
+    if (fieldsUsed.cardinality() == 0) {
+      RelNode input = newTableAccessRel;
+      if (input instanceof Project) {
+        // The table has implemented the project in the obvious way - by
+        // creating project with 0 fields. Strip it away, and create our own
+        // project with one field.
+        Project project = (Project) input;
+        if (project.getRowType().getFieldCount() == 0) {
+          input = project.getInput();
+        }
+      }
+      return dummyProject(fieldCount, input);
     }
 
-    relBuilder.push(newInput);
-    final int offset =
-        sort.offset == null ? 0 : RexLiteral.intValue(sort.offset);
-    final int fetch =
-        sort.fetch == null ? -1 : RexLiteral.intValue(sort.fetch);
-    final ImmutableList<RexNode> fields =
-        relBuilder.fields(RexUtil.apply(inputMapping, collation));
-
-    // The result has the same mapping as the input gave us. Sometimes we
-    // return fields that the consumer didn't ask for, because the filter
-    // needs them for its condition.
-    // TODO: Calcite will return empty LogicalValues when offset == 0 && fetch == 0.
-    // However, Hive ASTConverter can not deal with LogicalValues.
-    sortLimit(cluster, relBuilder, offset, fetch, fields);
-    return result(relBuilder.build(), inputMapping);
+    final Mapping mapping = createMapping(fieldsUsed, fieldCount);
+    return result(newTableAccessRel, mapping);
   }
-  
-  private List<RexNode> projects(RelDataType inputRowType, RelOptCluster cluster) {
-    final List<RexNode> exprList = new ArrayList<>();
-    for (RelDataTypeField field : inputRowType.getFieldList()) {
-      final RexBuilder rexBuilder = cluster.getRexBuilder();
-      exprList.add(rexBuilder.makeInputRef(field.getType(), field.getIndex()));
+
+  private static RelNode project(DruidQuery dq, ImmutableBitSet fieldsUsed,
+          Set<RelDataTypeField> extraFields, RelBuilder relBuilder) {
+    final int fieldCount = dq.getRowType().getFieldCount();
+    if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount))
+        && extraFields.isEmpty()) {
+      return dq;
     }
-    return exprList;
-  }
-  
-  private static RelFieldCollation collation(RexNode node,
-      RelFieldCollation.Direction direction,
-      RelFieldCollation.NullDirection nullDirection, List<RexNode> extraNodes) {
-    switch (node.getKind()) {
-    case INPUT_REF:
-      return new RelFieldCollation(((RexInputRef) node).getIndex(), direction,
-          Util.first(nullDirection, direction.defaultNullDirection()));
-    case DESCENDING:
-      return collation(((RexCall) node).getOperands().get(0),
-          RelFieldCollation.Direction.DESCENDING,
-          nullDirection, extraNodes);
-    case NULLS_FIRST:
-      return collation(((RexCall) node).getOperands().get(0), direction,
-          RelFieldCollation.NullDirection.FIRST, extraNodes);
-    case NULLS_LAST:
-      return collation(((RexCall) node).getOperands().get(0), direction,
-          RelFieldCollation.NullDirection.LAST, extraNodes);
-    default:
-      final int fieldIndex = extraNodes.size();
-      extraNodes.add(node);
-      return new RelFieldCollation(fieldIndex, direction,
-          Util.first(nullDirection, direction.defaultNullDirection()));
+    final List<RexNode> exprList = new ArrayList<>();
+    final List<String> nameList = new ArrayList<>();
+    final RexBuilder rexBuilder = dq.getCluster().getRexBuilder();
+    final List<RelDataTypeField> fields = dq.getRowType().getFieldList();
+
+    // Project the subset of fields.
+    for (int i : fieldsUsed) {
+      RelDataTypeField field = fields.get(i);
+      exprList.add(rexBuilder.makeInputRef(dq, i));
+      nameList.add(field.getName());
     }
-  }
-  
- private void sortLimit(RelOptCluster cluster, RelBuilder relBuilder, int offset, int fetch,
-     Iterable<? extends RexNode> nodes) {
-   final List<RelFieldCollation> fieldCollations = new ArrayList<>();
-   final RelDataType inputRowType = relBuilder.peek().getRowType();
-   final List<RexNode> extraNodes = projects(inputRowType, cluster);
-   final List<RexNode> originalExtraNodes = ImmutableList.copyOf(extraNodes);
-   for (RexNode node : nodes) {
-     fieldCollations.add(
-         collation(node, RelFieldCollation.Direction.ASCENDING,
-                 RelFieldCollation.NullDirection.FIRST, extraNodes));
-   }
-   final RexNode offsetNode = offset <= 0 ? null : relBuilder.literal(offset);
-   final RexNode fetchNode = fetch < 0 ? null : relBuilder.literal(fetch);
-   if (offsetNode == null && fetchNode == null && fieldCollations.isEmpty()) {
-     return; // sort is trivial
-   }
 
-   final boolean addedFields = extraNodes.size() > originalExtraNodes.size();
-   if (fieldCollations.isEmpty()) {
-     assert !addedFields;
-     RelNode top = relBuilder.peek();
-     if (top instanceof Sort) {
-       final Sort sort2 = (Sort) top;
-       if (sort2.offset == null && sort2.fetch == null) {
-         relBuilder.build();
-         relBuilder.push(sort2.getInput());
-         final RelNode sort =
-             HiveSortLimit.create(relBuilder.build(), sort2.collation,
-                 offsetNode, fetchNode);
-         relBuilder.push(sort);
-         return;
-       }
-     }
-     if (top instanceof Project) {
-       final Project project = (Project) top;
-       if (project.getInput() instanceof Sort) {
-         final Sort sort2 = (Sort) project.getInput();
-         if (sort2.offset == null && sort2.fetch == null) {
-           relBuilder.build();
-           relBuilder.push(sort2.getInput());
-           final RelNode sort =
-               HiveSortLimit.create(relBuilder.build(), sort2.collation,
-                   offsetNode, fetchNode);
-           relBuilder.push(sort);
-           relBuilder.project(project.getProjects());
-           return;
-         }
-       }
-     }
-   }
-   if (addedFields) {
-     relBuilder.project(extraNodes);
-   }
-   final RelNode sort =
-       HiveSortLimit.create(relBuilder.build(), RelCollations.of(fieldCollations),
-           offsetNode, fetchNode);
-   relBuilder.push(sort);
-   if (addedFields) {
-     relBuilder.project(originalExtraNodes);
-   }
-   return;
- }
- 
-  private TrimResult result(RelNode r, final Mapping mapping) {
-    final RexBuilder rexBuilder = relBuilder.getRexBuilder();
-    for (final CorrelationId correlation : r.getVariablesSet()) {
-      r = r.accept(
-          new CorrelationReferenceFinder() {
-            @Override
-            protected RexNode handle(RexFieldAccess fieldAccess) {
-              final RexCorrelVariable v =
-                  (RexCorrelVariable) fieldAccess.getReferenceExpr();
-              if (v.id.equals(correlation)
-                  && v.getType().getFieldCount() == mapping.getSourceCount()) {
-                final int old = fieldAccess.getField().getIndex();
-                final int new_ = mapping.getTarget(old);
-                final RelDataTypeFactory.FieldInfoBuilder typeBuilder =
-                    relBuilder.getTypeFactory().builder();
-                for (int target : Util.range(mapping.getTargetCount())) {
-                  typeBuilder.add(
-                      v.getType().getFieldList().get(mapping.getSource(target)));
-                }
-                final RexNode newV =
-                    rexBuilder.makeCorrel(typeBuilder.build(), v.id);
-                if (old != new_) {
-                  return rexBuilder.makeFieldAccess(newV, new_);
-                }
-              }
-              return fieldAccess;
-            }
-
-          });
+    // Project nulls for the extra fields. (Maybe a sub-class table has
+    // extra fields, but we don't.)
+    for (RelDataTypeField extraField : extraFields) {
+      exprList.add(
+          rexBuilder.ensureType(
+              extraField.getType(),
+              rexBuilder.constantNull(),
+              true));
+      nameList.add(extraField.getName());
     }
-    return new TrimResult(r, mapping);
+
+    HiveProject hp = (HiveProject) relBuilder.push(dq).project(exprList, nameList).build();
+    hp.setSynthetic();
+    return hp;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
index 18fe650..b86b947 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdCollation.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.metadata.BuiltInMetadata;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.MetadataDef;
+import org.apache.calcite.rel.metadata.MetadataHandler;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMdCollation;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
@@ -32,7 +35,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 
 import com.google.common.collect.ImmutableList;
 
-public class HiveRelMdCollation {
+public class HiveRelMdCollation implements MetadataHandler<BuiltInMetadata.Collation> {
 
   public static final RelMetadataProvider SOURCE =
           ChainedRelMetadataProvider.of(
@@ -47,6 +50,11 @@ public class HiveRelMdCollation {
 
   //~ Methods ----------------------------------------------------------------
 
+  @Override
+  public MetadataDef<BuiltInMetadata.Collation> getDef() {
+    return BuiltInMetadata.Collation.DEF;
+  }
+
   public ImmutableList<RelCollation> collations(HiveAggregate aggregate, RelMetadataQuery mq) {
     // Compute collations
     ImmutableList.Builder<RelFieldCollation> collationListBuilder =

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
index 62d3ead..e574e7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistribution.java
@@ -18,9 +18,11 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 
 import org.apache.calcite.rel.RelDistribution;
+import org.apache.calcite.rel.metadata.BuiltInMetadata;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.MetadataDef;
+import org.apache.calcite.rel.metadata.MetadataHandler;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
-import org.apache.calcite.rel.metadata.RelMdDistribution;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.util.BuiltInMethod;
@@ -30,7 +32,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 
 import com.google.common.collect.ImmutableList;
 
-public class HiveRelMdDistribution {
+public class HiveRelMdDistribution implements MetadataHandler<BuiltInMetadata.Distribution> {
 
   public static final RelMetadataProvider SOURCE =
           ChainedRelMetadataProvider.of(
@@ -44,6 +46,10 @@ public class HiveRelMdDistribution {
 
   //~ Methods ----------------------------------------------------------------
 
+  public MetadataDef<BuiltInMetadata.Distribution> getDef() {
+    return BuiltInMetadata.Distribution.DEF;
+  }
+
   public RelDistribution distribution(HiveAggregate aggregate, RelMetadataQuery mq) {
     return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED,
             aggregate.getGroupSet().asList());

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index e468573..69e157e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -40,6 +40,10 @@ import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.core.SemiJoin;
 import org.apache.calcite.rel.core.Union;
+import org.apache.calcite.rel.metadata.BuiltInMetadata;
+import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.MetadataDef;
+import org.apache.calcite.rel.metadata.MetadataHandler;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMdPredicates;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
@@ -72,14 +76,28 @@ import com.google.common.collect.Maps;
 
 
 //TODO: Move this to calcite
-public class HiveRelMdPredicates extends RelMdPredicates {
+public class HiveRelMdPredicates implements MetadataHandler<BuiltInMetadata.Predicates> {
 
-  public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource(
-                                                     BuiltInMethod.PREDICATES.method,
-                                                     new HiveRelMdPredicates());
+  public static final RelMetadataProvider SOURCE =
+          ChainedRelMetadataProvider.of(
+                  ImmutableList.of(
+                          ReflectiveRelMetadataProvider.reflectiveSource(
+                                  BuiltInMethod.PREDICATES.method, new HiveRelMdPredicates()),
+                          RelMdPredicates.SOURCE));
 
   private static final List<RexNode> EMPTY_LIST = ImmutableList.of();
 
+  //~ Constructors -----------------------------------------------------------
+
+  private HiveRelMdPredicates() {}
+
+  //~ Methods ----------------------------------------------------------------
+
+  @Override
+  public MetadataDef<BuiltInMetadata.Predicates> getDef() {
+    return BuiltInMetadata.Predicates.DEF;
+  }
+
   /**
    * Infers predicates for a project.
    *
@@ -99,8 +117,8 @@ public class HiveRelMdPredicates extends RelMdPredicates {
    *
    * </ol>
    */
-  @Override
   public RelOptPredicateList getPredicates(Project project, RelMetadataQuery mq) {
+
     RelNode child = project.getInput();
     final RexBuilder rexBuilder = project.getCluster().getRexBuilder();
     RelOptPredicateList childInfo = mq.getPulledUpPredicates(child);
@@ -151,7 +169,6 @@ public class HiveRelMdPredicates extends RelMdPredicates {
   }
 
   /** Infers predicates for a {@link org.apache.calcite.rel.core.Join}. */
-  @Override
   public RelOptPredicateList getPredicates(Join join, RelMetadataQuery mq) {
     RexBuilder rB = join.getCluster().getRexBuilder();
     RelNode left = join.getInput(0);
@@ -181,7 +198,6 @@ public class HiveRelMdPredicates extends RelMdPredicates {
    * pulledUpExprs    : { a &gt; 7}
    * </pre>
    */
-  @Override
   public RelOptPredicateList getPredicates(Aggregate agg, RelMetadataQuery mq) {
     final RelNode input = agg.getInput();
     final RelOptPredicateList inputInfo = mq.getPulledUpPredicates(input);
@@ -209,7 +225,6 @@ public class HiveRelMdPredicates extends RelMdPredicates {
   /**
    * Infers predicates for a Union.
    */
-  @Override
   public RelOptPredicateList getPredicates(Union union, RelMetadataQuery mq) {
     RexBuilder rB = union.getCluster().getRexBuilder();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
index 0d03ebb..651adc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
@@ -41,13 +41,16 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import com.google.common.collect.ImmutableMap;
 
 public class HiveRelMdSelectivity extends RelMdSelectivity {
-  public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource(
-                                                     BuiltInMethod.SELECTIVITY.method,
-                                                     new HiveRelMdSelectivity());
 
-  protected HiveRelMdSelectivity() {
-    super();
-  }
+  public static final RelMetadataProvider SOURCE =
+      ReflectiveRelMetadataProvider.reflectiveSource(
+          BuiltInMethod.SELECTIVITY.method, new HiveRelMdSelectivity());
+
+  //~ Constructors -----------------------------------------------------------
+
+  private HiveRelMdSelectivity() {}
+
+  //~ Methods ----------------------------------------------------------------
 
   public Double getSelectivity(HiveTableScan t, RelMetadataQuery mq, RexNode predicate) {
     if (predicate != null) {
@@ -58,7 +61,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
     return 1.0;
   }
 
-  public Double getSelectivity(HiveJoin j, RelMetadataQuery mq, RexNode predicate) throws CalciteSemanticException {
+  public Double getSelectivity(HiveJoin j, RelMetadataQuery mq, RexNode predicate) {
     if (j.getJoinType().equals(JoinRelType.INNER)) {
       return computeInnerJoinSelectivity(j, mq, predicate);
     } else if (j.getJoinType().equals(JoinRelType.LEFT) ||
@@ -75,7 +78,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
     return 1.0;
   }
 
-  private Double computeInnerJoinSelectivity(HiveJoin j, RelMetadataQuery mq, RexNode predicate) throws CalciteSemanticException {
+  private Double computeInnerJoinSelectivity(HiveJoin j, RelMetadataQuery mq, RexNode predicate) {
     double ndvCrossProduct = 1;
     Pair<Boolean, RexNode> predInfo =
         getCombinedPredicateForJoin(j, predicate);
@@ -86,8 +89,13 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
     }
 
     RexNode combinedPredicate = predInfo.getValue();
-    JoinPredicateInfo jpi = JoinPredicateInfo.constructJoinPredicateInfo(j,
-        combinedPredicate);
+    JoinPredicateInfo jpi;
+    try {
+      jpi = JoinPredicateInfo.constructJoinPredicateInfo(j,
+          combinedPredicate);
+    } catch (CalciteSemanticException e) {
+      throw new RuntimeException(e);
+    }
     ImmutableMap.Builder<Integer, Double> colStatMapBuilder = ImmutableMap
         .builder();
     ImmutableMap<Integer, Double> colStatMap;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
index 31adb41..1039f56 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSize.java
@@ -122,7 +122,18 @@ public class HiveRelMdSize extends RelMdSize {
     case BIGINT:
     case DOUBLE:
     case TIMESTAMP:
-    case INTERVAL_DAY_TIME:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_MONTH:
+    case INTERVAL_SECOND:
+    case INTERVAL_YEAR:
     case INTERVAL_YEAR_MONTH:
       return 8d;
     case BINARY:

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
index 0718150..9a5a2ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
@@ -30,9 +30,9 @@ import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.metadata.BuiltInMetadata;
-import org.apache.calcite.rel.metadata.Metadata;
+import org.apache.calcite.rel.metadata.MetadataDef;
+import org.apache.calcite.rel.metadata.MetadataHandler;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
-import org.apache.calcite.rel.metadata.RelMdUniqueKeys;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rex.RexInputRef;
@@ -43,13 +43,16 @@ import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 
-import com.google.common.base.Function;
+public class HiveRelMdUniqueKeys implements MetadataHandler<BuiltInMetadata.UniqueKeys> {
 
-public class HiveRelMdUniqueKeys {
+  public static final RelMetadataProvider SOURCE =
+      ReflectiveRelMetadataProvider.reflectiveSource(
+          BuiltInMethod.UNIQUE_KEYS.method, new HiveRelMdUniqueKeys());
 
-  public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider
-      .reflectiveSource(BuiltInMethod.UNIQUE_KEYS.method,
-          new HiveRelMdUniqueKeys());
+  @Override
+  public MetadataDef<BuiltInMetadata.UniqueKeys> getDef() {
+    return BuiltInMetadata.UniqueKeys.DEF;
+  }
 
   /*
    * Infer Uniquenes if: - rowCount(col) = ndv(col) - TBD for numerics: max(col)
@@ -65,7 +68,60 @@ public class HiveRelMdUniqueKeys {
     HiveTableScan tScan = getTableScan(rel.getInput(), false);
 
     if (tScan == null) {
-      return mq.getUniqueKeys(rel, ignoreNulls);
+      // If HiveTableScan is not found, e.g., not sequence of Project and
+      // Filter operators, execute the original getUniqueKeys method
+
+      // LogicalProject maps a set of rows to a different set;
+      // Without knowledge of the mapping function(whether it
+      // preserves uniqueness), it is only safe to derive uniqueness
+      // info from the child of a project when the mapping is f(a) => a.
+      //
+      // Further more, the unique bitset coming from the child needs
+      // to be mapped to match the output of the project.
+      final Map<Integer, Integer> mapInToOutPos = new HashMap<>();
+      final List<RexNode> projExprs = rel.getProjects();
+      final Set<ImmutableBitSet> projUniqueKeySet = new HashSet<>();
+
+      // Build an input to output position map.
+      for (int i = 0; i < projExprs.size(); i++) {
+        RexNode projExpr = projExprs.get(i);
+        if (projExpr instanceof RexInputRef) {
+          mapInToOutPos.put(((RexInputRef) projExpr).getIndex(), i);
+        }
+      }
+
+      if (mapInToOutPos.isEmpty()) {
+        // if there's no RexInputRef in the projected expressions
+        // return empty set.
+        return projUniqueKeySet;
+      }
+
+      Set<ImmutableBitSet> childUniqueKeySet =
+          mq.getUniqueKeys(rel.getInput(), ignoreNulls);
+
+      if (childUniqueKeySet != null) {
+        // Now add to the projUniqueKeySet the child keys that are fully
+        // projected.
+        for (ImmutableBitSet colMask : childUniqueKeySet) {
+          ImmutableBitSet.Builder tmpMask = ImmutableBitSet.builder();
+          boolean completeKeyProjected = true;
+          for (int bit : colMask) {
+            if (mapInToOutPos.containsKey(bit)) {
+              tmpMask.set(mapInToOutPos.get(bit));
+            } else {
+              // Skip the child unique key if part of it is not
+              // projected.
+              completeKeyProjected = false;
+              break;
+            }
+          }
+          if (completeKeyProjected) {
+            projUniqueKeySet.add(tmpMask.build());
+          }
+        }
+      }
+
+      return projUniqueKeySet;
     }
 
     Map<Integer, Integer> posMap = new HashMap<Integer, Integer>();

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index 9a5becb..7b9ee84 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -22,15 +22,15 @@ import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 
+import org.apache.calcite.adapter.druid.DruidQuery;
+import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidQuery;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -58,19 +58,20 @@ class ASTBuilder {
                 "TOK_TMP_FILE")).node();
   }
 
-  static ASTNode table(TableScan scan) {
-    RelOptHiveTable hTbl = (RelOptHiveTable) scan.getTable();
-    ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add(
-        ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME")
-            .add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName())
-            .add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName()));
-
+  static ASTNode table(RelNode scan) {
     HiveTableScan hts;
     if (scan instanceof DruidQuery) {
       hts = (HiveTableScan) ((DruidQuery)scan).getTableScan();
     } else {
       hts = (HiveTableScan) scan;
     }
+
+    RelOptHiveTable hTbl = (RelOptHiveTable) hts.getTable();
+    ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add(
+        ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME")
+            .add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName())
+            .add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName()));
+
     ASTBuilder propList = ASTBuilder.construct(HiveParser.TOK_TABLEPROPLIST, "TOK_TABLEPROPLIST");
     if (scan instanceof DruidQuery) {
       // Pass possible query to Druid
@@ -181,8 +182,19 @@ class ASTBuilder {
     case DATE:
     case TIME:
     case TIMESTAMP:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_MONTH:
+    case INTERVAL_SECOND:
+    case INTERVAL_YEAR:
     case INTERVAL_YEAR_MONTH:
-    case INTERVAL_DAY_TIME:
       if (literal.getValue() == null) {
         return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node();
       }
@@ -273,14 +285,25 @@ class ASTBuilder {
       val = "'" + val + "'";
     }
       break;
+    case INTERVAL_YEAR:
+    case INTERVAL_MONTH:
     case INTERVAL_YEAR_MONTH: {
       type = HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL;
       BigDecimal monthsBd = (BigDecimal) literal.getValue();
       HiveIntervalYearMonth intervalYearMonth = new HiveIntervalYearMonth(monthsBd.intValue());
       val = "'" + intervalYearMonth.toString() + "'";
-      break;
     }
-    case INTERVAL_DAY_TIME: {
+      break;
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_SECOND: {
       type = HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL;
       BigDecimal millisBd = (BigDecimal) literal.getValue();
 
@@ -288,8 +311,8 @@ class ASTBuilder {
       BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
       HiveIntervalDayTime intervalDayTime = new HiveIntervalDayTime(secsBd);
       val = "'" + intervalDayTime.toString() + "'";
-      break;
     }
+      break;
     case NULL:
       type = HiveParser.TOK_NULL;
       break;


[40/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q
index d2ded71..2d3788d 100644
--- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q
+++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q
@@ -7,34 +7,33 @@ set hive.tez.dynamic.partition.pruning=true;
 set hive.optimize.metadataonly=false;
 set hive.optimize.index.filter=true;
 set hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 
 select distinct ds from srcpart;
 select distinct hr from srcpart;
 
-EXPLAIN VECTORIZATION create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds;
+EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds;
 create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds;
 create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr;
 create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr;
 create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr;
 
 -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
@@ -42,77 +41,77 @@ set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where hr = 11 and ds = '2008-04-08';
 
 -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = '2008-04-08' and hr = 11;
 
 -- empty set
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where ds = 'I DONT EXIST';
 
 -- expressions
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=false;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where hr = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11;
 set hive.tez.dynamic.partition.pruning=true;
 select count(*) from srcpart where cast(hr as string) = 11;
 
 
 -- parent is reduce tasks
-EXPLAIN VECTORIZATION select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- non-equi join
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
 select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr);
 
 -- old style join syntax
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
 select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr;
 
 -- left join
-EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
-EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- full outer
-EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- with static pruning
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 
 -- union + subquery
-EXPLAIN VECTORIZATION select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
+EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
-EXPLAIN VECTORIZATION select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
+EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
-EXPLAIN VECTORIZATION select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
+EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 
 set hive.auto.convert.join=true;
@@ -120,60 +119,60 @@ set hive.auto.convert.join.noconditionaltask = true;
 set hive.auto.convert.join.noconditionaltask.size = 10000000;
 
 -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 
 -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11;
 select count(*) from srcpart where hr = 11 and ds = '2008-04-08';
 
 -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11;
 select count(*) from srcpart where ds = '2008-04-08' and hr = 11;
 
 -- empty set
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST';
 
 -- expressions
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11;
 select count(*) from srcpart where hr = 11;
 
 set hive.stats.fetch.column.stats=false;
 -- parent is reduce tasks
 
-EXPLAIN VECTORIZATION select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08';
 select count(*) from srcpart where ds = '2008-04-08';
 set hive.stats.fetch.column.stats=true;
 
 -- left join
-EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
-EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- full outer
-EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08';
 
 -- with static pruning
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11;
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13;
 
 -- union + subquery
-EXPLAIN VECTORIZATION select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
+EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart);
 
 
@@ -186,7 +185,7 @@ set hive.vectorized.execution.enabled=false;
 set hive.exec.max.dynamic.partitions=1000;
 
 insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart;
-EXPLAIN VECTORIZATION select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09');
+EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09');
 select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09');
 select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_mapjoin.q b/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
index 138c133..6500d41 100644
--- a/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
@@ -4,11 +4,10 @@ SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.auto.convert.join.noconditionaltask=true;
 SET hive.auto.convert.join.noconditionaltask.size=1000000000;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_mapjoin2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_mapjoin2.q b/ql/src/test/queries/clientpositive/vectorized_mapjoin2.q
index d259547..137acbc 100644
--- a/ql/src/test/queries/clientpositive/vectorized_mapjoin2.q
+++ b/ql/src/test/queries/clientpositive/vectorized_mapjoin2.q
@@ -15,7 +15,7 @@ create temporary table y (b int) stored as orc;
 insert into x values(1);
 insert into y values(1);
 
-explain vectorization expression
+explain
 select count(1) from x, y where a = b;
 
 select count(1) from x, y where a = b;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_math_funcs.q b/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
index 6875909..d79fcce 100644
--- a/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
+++ b/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
@@ -1,10 +1,9 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled = true;
-set hive.fetch.task.conversion=none;
 
 -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression
+explain 
 select
    cdouble
   ,Round(cdouble, 2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q b/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
index 5b07c9f..4332898 100644
--- a/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
@@ -4,10 +4,9 @@ SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.auto.convert.join.noconditionaltask=true;
 SET hive.auto.convert.join.noconditionaltask.size=1000000000;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-explain vectorization select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint;
+explain select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint;
 
 select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_parquet.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_parquet.q b/ql/src/test/queries/clientpositive/vectorized_parquet.q
index e6ebdaa..da138e0 100644
--- a/ql/src/test/queries/clientpositive/vectorized_parquet.q
+++ b/ql/src/test/queries/clientpositive/vectorized_parquet.q
@@ -21,7 +21,7 @@ insert overwrite table alltypes_parquet
   
 SET hive.vectorized.execution.enabled=true;
   
-explain vectorization select * 
+explain select * 
   from alltypes_parquet
   where cint = 528534767 
   limit 10;
@@ -30,7 +30,7 @@ select *
   where cint = 528534767 
   limit 10;
 
-explain vectorization select ctinyint, 
+explain select ctinyint, 
   max(cint), 
   min(csmallint), 
   count(cstring1), 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_parquet_types.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_parquet_types.q b/ql/src/test/queries/clientpositive/vectorized_parquet_types.q
index 68761b6..297c5af 100644
--- a/ql/src/test/queries/clientpositive/vectorized_parquet_types.q
+++ b/ql/src/test/queries/clientpositive/vectorized_parquet_types.q
@@ -48,19 +48,19 @@ SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
 unhex(cbinary), cdecimal FROM parquet_types_staging;
 
 -- select
-explain vectorization expression
+explain
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
 hex(cbinary), cdecimal FROM parquet_types;
 
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
 hex(cbinary), cdecimal FROM parquet_types;
 
-explain vectorization expression
+explain
 SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types;
 
 SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types;
 
-explain vectorization expression
+explain
 SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_ptf.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_ptf.q b/ql/src/test/queries/clientpositive/vectorized_ptf.q
index e648320..64082e9 100644
--- a/ql/src/test/queries/clientpositive/vectorized_ptf.q
+++ b/ql/src/test/queries/clientpositive/vectorized_ptf.q
@@ -1,5 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -43,7 +42,7 @@ insert into table part_orc select * from part_staging;
 
 --1. test1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -64,7 +63,7 @@ from noop(on part_orc
 
 -- 2. testJoinWithNoop
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
 from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j
@@ -81,7 +80,7 @@ sort by j.p_name)
 
 -- 3. testOnlyPTF
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size
 from noop(on part_orc
 partition by p_mfgr
@@ -94,7 +93,7 @@ order by p_name);
 
 -- 4. testPTFAlias
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -115,7 +114,7 @@ from noop(on part_orc
 
 -- 5. testPTFAndWhereWithWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -138,7 +137,7 @@ from noop(on part_orc
 
 -- 6. testSWQAndPTFAndGBy
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -163,7 +162,7 @@ group by p_mfgr, p_name, p_size
 
 -- 7. testJoin
 
-explain vectorization extended
+explain extended
 select abc.* 
 from noop(on part_orc 
 partition by p_mfgr 
@@ -178,7 +177,7 @@ order by p_name
 
 -- 8. testJoinRight
 
-explain vectorization extended
+explain extended
 select abc.* 
 from part_orc p1 join noop(on part_orc 
 partition by p_mfgr 
@@ -193,7 +192,7 @@ order by p_name
 
 -- 9. testNoopWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name, p_size desc) as r
 from noopwithmap(on part_orc
@@ -208,7 +207,7 @@ order by p_name, p_size desc);
 
 -- 10. testNoopWithMapWithWindowing 
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -227,7 +226,7 @@ from noopwithmap(on part_orc
   
 -- 11. testHavingWithWindowingPTFNoGBY
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
@@ -248,7 +247,7 @@ order by p_name)
   
 -- 12. testFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -269,7 +268,7 @@ order by p_mfgr, p_name
  
 -- 13. testPTFAndWindowingInSubQ
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name, 
 sub1.cd, sub1.s1 
 from (select p_mfgr, p_name, 
@@ -296,7 +295,7 @@ window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2
 
 -- 14. testPTFJoinWithWindowingWithCount
 
-explain vectorization extended
+explain extended
 select abc.p_mfgr, abc.p_name, 
 rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, 
 dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, 
@@ -323,7 +322,7 @@ order by p_name
 
 -- 15. testDistinctInSelectWithPTF
 
-explain vectorization extended
+explain extended
 select DISTINCT p_mfgr, p_name, p_size 
 from noop(on part_orc 
 partition by p_mfgr 
@@ -342,7 +341,7 @@ sum(p_retailprice) as s
 from part_orc 
 group by p_mfgr, p_brand;
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_brand, s, 
 sum(s) over w1  as s1
 from noop(on mfgr_price_view 
@@ -376,7 +375,7 @@ dr INT,
 cud DOUBLE, 
 fv1 INT);
 
-explain vectorization extended
+explain extended
 from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
@@ -413,7 +412,7 @@ select * from part_5;
 
 -- 18. testMulti2OperatorsFunctionChainWithMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -448,7 +447,7 @@ from noop(on
 
 -- 19. testMulti3OperatorsFunctionChain
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -483,7 +482,7 @@ from noop(on
         
 -- 20. testMultiOperatorChainWithNoWindowing
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
@@ -515,7 +514,7 @@ from noop(on
 
 -- 21. testMultiOperatorChainEndsWithNoopMap
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name) as dr, 
@@ -550,7 +549,7 @@ from noopwithmap(on
 
 -- 22. testMultiOperatorChainWithDiffPartitionForWindow1
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, 
 dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, 
@@ -583,7 +582,7 @@ from noop(on
 
 -- 23. testMultiOperatorChainWithDiffPartitionForWindow2
 
-explain vectorization extended
+explain extended
 select p_mfgr, p_name,  
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q b/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
index 9227de0..f57d062 100644
--- a/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
+++ b/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
@@ -2,11 +2,10 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=false;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint order by CNT;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_string_funcs.q b/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
index ee95c0b..d04a3c3 100644
--- a/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
+++ b/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
@@ -1,10 +1,9 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled = true;
-set hive.fetch.task.conversion=none;
 
 -- Test string functions in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
    substr(cstring1, 1, 2)
   ,substr(cstring1, 2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_timestamp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp.q b/ql/src/test/queries/clientpositive/vectorized_timestamp.q
index ceee2ee..2784b7a 100644
--- a/ql/src/test/queries/clientpositive/vectorized_timestamp.q
+++ b/ql/src/test/queries/clientpositive/vectorized_timestamp.q
@@ -6,23 +6,23 @@ CREATE TABLE test(ts TIMESTAMP) STORED AS ORC;
 INSERT INTO TABLE test VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999');
 
 SET hive.vectorized.execution.enabled = false;
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ts FROM test;
 
 SELECT ts FROM test;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test;
 
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test;
 
 SET hive.vectorized.execution.enabled = true;
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT ts FROM test;
 
 SELECT ts FROM test;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test;
 
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
index afbc18a..aaf85fc 100644
--- a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
+++ b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
 -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end.
 -- Turning on vectorization has been temporarily moved after filling the test table
 -- due to bug HIVE-8197.
@@ -24,7 +23,7 @@ INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1;
 INSERT INTO TABLE alltypesorc_wrong SELECT '2000:01:01 00-00-00' FROM alltypesorc LIMIT 1;
 INSERT INTO TABLE alltypesorc_wrong SELECT '0000-00-00 99:99:99' FROM alltypesorc LIMIT 1;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) AS c1,
   year(ctimestamp1),
   month(ctimestamp1),
@@ -50,7 +49,7 @@ SELECT
 FROM alltypesorc_string
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -76,7 +75,7 @@ SELECT
 FROM alltypesorc_string
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1,
   year(ctimestamp1) = year(stimestamp1),
   month(ctimestamp1) = month(stimestamp1),
@@ -104,7 +103,7 @@ FROM alltypesorc_string
 ORDER BY c1;
 
 -- Wrong format. Should all be NULL.
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -130,7 +129,7 @@ SELECT
 FROM alltypesorc_wrong
 ORDER BY c1;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   min(ctimestamp1),
   max(ctimestamp1),
   count(ctimestamp1),
@@ -145,7 +144,7 @@ SELECT
 FROM alltypesorc_string;
 
 -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)...
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   round(sum(ctimestamp1), 3)
 FROM alltypesorc_string;
 
@@ -153,7 +152,7 @@ SELECT
  round(sum(ctimestamp1), 3)
 FROM alltypesorc_string;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   round(avg(ctimestamp1), 0),
   variance(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,
   var_pop(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vectorized_timestamp_ints_casts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp_ints_casts.q b/ql/src/test/queries/clientpositive/vectorized_timestamp_ints_casts.q
index e6e6d5d..15964c9 100644
--- a/ql/src/test/queries/clientpositive/vectorized_timestamp_ints_casts.q
+++ b/ql/src/test/queries/clientpositive/vectorized_timestamp_ints_casts.q
@@ -1,9 +1,8 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled = true;
 SET hive.int.timestamp.conversion.in.seconds=false;
-set hive.fetch.task.conversion=none;
 
-explain vectorization expression
+explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)
@@ -41,7 +40,7 @@ where cbigint % 250 = 0;
 
 SET hive.int.timestamp.conversion.in.seconds=true;
 
-explain vectorization expression
+explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
index 5b74e0b..d8032d8 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
@@ -83,73 +83,25 @@ POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SIMPLE [(valu
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_permute_select
-                  Statistics: Num rows: 2 Data size: 202 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=101)
+            default@part_add_int_permute_select,part_add_int_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_permute_select
@@ -250,73 +202,25 @@ POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).c EXPRES
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_string_permute_select
-                  Statistics: Num rows: 2 Data size: 290 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=145)
+            default@part_add_int_string_permute_select,part_add_int_string_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_string_permute_select
@@ -479,73 +383,25 @@ POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 SIMPLE [
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	double1	double1	double1	_c4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 2130 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=5 width=426)
+            default@part_change_string_group_double,part_change_string_group_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
@@ -645,73 +501,25 @@ POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_date_group_string_group_date_timestamp
-                  Statistics: Num rows: 6 Data size: 3521 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), c6 (type: string), c7 (type: char(50)), c8 (type: char(15)), c9 (type: varchar(50)), c10 (type: varchar(15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:string, c2:char(50), c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), c8:char(15), c9:varchar(50), c10:varchar(15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=586)
+            default@part_change_date_group_string_group_date_timestamp,part_change_date_group_string_group_date_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
@@ -888,73 +696,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 6 Data size: 2903 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22"]
+          TableScan [TS_0] (rows=6 width=483)
+            default@part_change_numeric_group_string_group_multi_ints_string_group,part_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1113,73 +873,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 6 Data size: 4540 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
+          TableScan [TS_0] (rows=6 width=756)
+            default@part_change_numeric_group_string_group_floating_string_group,part_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
@@ -1326,73 +1038,25 @@ POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_string_group_string
-                  Statistics: Num rows: 6 Data size: 6682 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 (type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:char(50), c2:char(9), c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, c8:char(50), c9:char(9), c10:string, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=1113)
+            default@part_change_string_group_string_group_string,part_change_string_group_string_group_string,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
@@ -1573,73 +1237,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint P
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint
-                  Statistics: Num rows: 6 Data size: 1419 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 (type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 (type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 20
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    dataColumns: insert_num:int, c1:smallint, c2:int, c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
+          TableScan [TS_0] (rows=6 width=236)
+            default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint,part_change_lower_to_higher_numeric_group_tinyint_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
@@ -1750,73 +1366,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PA
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_decimal_to_float
-                  Statistics: Num rows: 6 Data size: 1523 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: float), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:float, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=6 width=253)
+            default@part_change_lower_to_higher_numeric_group_decimal_to_float,part_change_lower_to_higher_numeric_group_decimal_to_float,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY


[45/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java
deleted file mode 100644
index e0a6198..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical;
-
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-
-/**
- * Why a node did not vectorize.
- *
- */
-public class VectorizerReason  {
-
-  private static long serialVersionUID = 1L;
-
-  public static enum VectorizerNodeIssue {
-    NONE,
-    NODE_ISSUE,
-    OPERATOR_ISSUE,
-    EXPRESSION_ISSUE
-  }
-
-  private final VectorizerNodeIssue vectorizerNodeIssue;
-
-  private final Operator<? extends OperatorDesc> operator;
-
-  private final String expressionTitle;
-
-  private final String issue;
-
-  private VectorizerReason(VectorizerNodeIssue vectorizerNodeIssue,
-      Operator<? extends OperatorDesc> operator, String expressionTitle, String issue) {
-    this.vectorizerNodeIssue = vectorizerNodeIssue;
-    this.operator = operator;
-    this.expressionTitle = expressionTitle;
-    this.issue = issue;
-  }
-
-  public static VectorizerReason createNodeIssue(String issue) {
-    return new VectorizerReason(
-        VectorizerNodeIssue.NODE_ISSUE,
-        null,
-        null,
-        issue);
-  }
-
-  public static VectorizerReason createOperatorIssue(Operator<? extends OperatorDesc> operator,
-      String issue) {
-    return new VectorizerReason(
-        VectorizerNodeIssue.OPERATOR_ISSUE,
-        operator,
-        null,
-        issue);
-  }
-
-  public static VectorizerReason createExpressionIssue(Operator<? extends OperatorDesc> operator,
-      String expressionTitle, String issue) {
-    return new VectorizerReason(
-        VectorizerNodeIssue.EXPRESSION_ISSUE,
-        operator,
-        expressionTitle,
-        issue);
-  }
-
-  @Override
-  public VectorizerReason clone() {
-    return new VectorizerReason(vectorizerNodeIssue, operator, expressionTitle, issue);
-  }
-
-  public VectorizerNodeIssue getVectorizerNodeIssue() {
-    return vectorizerNodeIssue;
-  }
-
-  public Operator<? extends OperatorDesc> getOperator() {
-    return operator;
-  }
-
-  public String getExpressionTitle() {
-    return expressionTitle;
-  }
-
-  public String getIssue() {
-    return issue;
-  }
-
-  @Override
-  public String toString() {
-    String reason;
-    switch (vectorizerNodeIssue) {
-    case NODE_ISSUE:
-      reason = (issue == null ? "unknown" : issue);
-      break;
-    case OPERATOR_ISSUE:
-      reason = (operator == null ? "Unknown" : operator.getType()) + " operator: " +
-           (issue == null ? "unknown" : issue);
-      break;
-    case EXPRESSION_ISSUE:
-      reason = expressionTitle + " expression for " +
-          (operator == null ? "Unknown" : operator.getType()) + " operator: " +
-              (issue == null ? "unknown" : issue);
-      break;
-    default:
-      reason = "Unknown " + vectorizerNodeIssue;
-    }
-    return reason;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
index 1f118dc..4a8ff15 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainConfiguration.java
@@ -27,27 +27,12 @@ import org.apache.hadoop.fs.Path;
  */
 
 public class ExplainConfiguration {
-
-  public enum VectorizationDetailLevel {
-
-    SUMMARY(4), OPERATOR(3), EXPRESSION(2), DETAIL(1);
-
-    public final int rank;
-    VectorizationDetailLevel(int rank) {
-      this.rank = rank;
-    }
-  };
-
   private boolean extended = false;
   private boolean formatted = false;
   private boolean dependency = false;
   private boolean logical = false;
   private boolean authorize = false;
   private boolean userLevelExplain = false;
-  private boolean vectorization = false;
-  private boolean vectorizationOnly = false;
-  private VectorizationDetailLevel vectorizationDetailLevel = VectorizationDetailLevel.SUMMARY;
-
   private Path explainRootPath;
   private Map<String, Long> opIdToRuntimeNumRows;
 
@@ -113,30 +98,6 @@ public class ExplainConfiguration {
     this.userLevelExplain = userLevelExplain;
   }
 
-  public boolean isVectorization() {
-    return vectorization;
-  }
-
-  public void setVectorization(boolean vectorization) {
-    this.vectorization = vectorization;
-  }
-
-  public boolean isVectorizationOnly() {
-    return vectorizationOnly;
-  }
-
-  public void setVectorizationOnly(boolean vectorizationOnly) {
-    this.vectorizationOnly = vectorizationOnly;
-  }
-
-  public VectorizationDetailLevel getVectorizationDetailLevel() {
-    return vectorizationDetailLevel;
-  }
-
-  public void setVectorizationDetailLevel(VectorizationDetailLevel vectorizationDetailLevel) {
-    this.vectorizationDetailLevel = vectorizationDetailLevel;
-  }
-
   public Path getExplainRootPath() {
     return explainRootPath;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index f62cf9a..300542e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
-import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
 import org.apache.hadoop.hive.ql.processors.CommandProcessor;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
@@ -71,9 +70,7 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
   @SuppressWarnings("unchecked")
   @Override
   public void analyzeInternal(ASTNode ast) throws SemanticException {
-    final int childCount = ast.getChildCount();
-    int i = 1;   // Skip TOK_QUERY.
-    while (i < childCount) {
+    for (int i = 1; i < ast.getChildCount(); i++) {
       int explainOptions = ast.getChild(i).getType();
       if (explainOptions == HiveParser.KW_FORMATTED) {
         config.setFormatted(true);
@@ -88,40 +85,7 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
       } else if (explainOptions == HiveParser.KW_ANALYZE) {
         config.setAnalyze(AnalyzeState.RUNNING);
         config.setExplainRootPath(ctx.getMRTmpPath());
-      } else if (explainOptions == HiveParser.KW_VECTORIZATION) {
-        config.setVectorization(true);
-        if (i + 1 < childCount) {
-          int vectorizationOption = ast.getChild(i + 1).getType();
-
-          // [ONLY]
-          if (vectorizationOption == HiveParser.TOK_ONLY) {
-            config.setVectorizationOnly(true);
-            i++;
-            if (i + 1 >= childCount) {
-              break;
-            }
-            vectorizationOption = ast.getChild(i + 1).getType();
-          }
-
-          // [SUMMARY|OPERATOR|EXPRESSION|DETAIL]
-          if (vectorizationOption == HiveParser.TOK_SUMMARY) {
-            config.setVectorizationDetailLevel(VectorizationDetailLevel.SUMMARY);
-            i++;
-          } else if (vectorizationOption == HiveParser.TOK_OPERATOR) {
-            config.setVectorizationDetailLevel(VectorizationDetailLevel.OPERATOR);
-            i++;
-          } else if (vectorizationOption == HiveParser.TOK_EXPRESSION) {
-            config.setVectorizationDetailLevel(VectorizationDetailLevel.EXPRESSION);
-            i++;
-          } else if (vectorizationOption == HiveParser.TOK_DETAIL) {
-            config.setVectorizationDetailLevel(VectorizationDetailLevel.DETAIL);
-            i++;
-          }
-        }
-      } else {
-        // UNDONE: UNKNOWN OPTION?
       }
-      i++;
     }
 
     ctx.setExplainConfig(config);

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 025ea10..5d3fa6a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -335,11 +335,6 @@ KW_KEY: 'KEY';
 KW_ABORT: 'ABORT';
 KW_EXTRACT: 'EXTRACT';
 KW_FLOOR: 'FLOOR';
-KW_VECTORIZATION: 'VECTORIZATION';
-KW_SUMMARY: 'SUMMARY';
-KW_OPERATOR: 'OPERATOR';
-KW_EXPRESSION: 'EXPRESSION';
-KW_DETAIL: 'DETAIL';
 
 // Operators
 // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index eebd875..5c16c55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -380,11 +380,6 @@ TOK_ROLLBACK;
 TOK_SET_AUTOCOMMIT;
 TOK_CACHE_METADATA;
 TOK_ABORT_TRANSACTIONS;
-TOK_ONLY;
-TOK_SUMMARY;
-TOK_OPERATOR;
-TOK_EXPRESSION;
-TOK_DETAIL;
 }
 
 
@@ -722,28 +717,7 @@ explainStatement
 explainOption
 @init { msgs.push("explain option"); }
 @after { msgs.pop(); }
-    : KW_EXTENDED|KW_FORMATTED|KW_DEPENDENCY|KW_LOGICAL|KW_AUTHORIZATION|KW_ANALYZE|
-      (KW_VECTORIZATION vectorizationOnly? vectorizatonDetail?)
-    ;
-
-vectorizationOnly
-@init { pushMsg("vectorization's only clause", state); }
-@after { popMsg(state); }
-    : KW_ONLY
-    -> ^(TOK_ONLY)
-    ;
-
-vectorizatonDetail
-@init { pushMsg("vectorization's detail level clause", state); }
-@after { popMsg(state); }
-    : KW_SUMMARY
-    -> ^(TOK_SUMMARY)
-    | KW_OPERATOR
-    -> ^(TOK_OPERATOR)
-    | KW_EXPRESSION
-    -> ^(TOK_EXPRESSION)
-    | KW_DETAIL
-    -> ^(TOK_DETAIL)
+    : KW_EXTENDED|KW_FORMATTED|KW_DEPENDENCY|KW_LOGICAL|KW_AUTHORIZATION|KW_ANALYZE
     ;
 
 execStatement

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 13f6879..50987c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -719,12 +719,6 @@ nonReserved
     | KW_VALIDATE
     | KW_NOVALIDATE
     | KW_KEY
-    | KW_VECTORIZATION
-    | KW_SUMMARY
-    | KW_OPERATOR
-    | KW_EXPRESSION
-    | KW_DETAIL
-
 ;
 
 //The following SQL2011 reserved keywords are used as function name only, but not as identifiers.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
index 2c14203..e217bdf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java
@@ -29,10 +29,6 @@ import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 public class AbstractOperatorDesc implements OperatorDesc {
 
   protected boolean vectorMode = false;
-
-  // Extra parameters only for vectorization.
-  protected VectorDesc vectorDesc;
-
   protected Statistics statistics;
   protected transient OpTraits opTraits;
   protected transient Map<String, String> opProps;
@@ -68,14 +64,6 @@ public class AbstractOperatorDesc implements OperatorDesc {
     this.vectorMode = vm;
   }
 
-  public void setVectorDesc(VectorDesc vectorDesc) {
-    this.vectorDesc = vectorDesc;
-  }
-
-  public VectorDesc getVectorDesc() {
-    return vectorDesc;
-  }
-
   @Override
   public OpTraits getTraits() {
     return opTraits;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java
index 4304b11..5157ebd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java
@@ -18,24 +18,10 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import org.apache.hadoop.hive.ql.exec.Operator;
-
 public class AbstractVectorDesc implements VectorDesc {
 
-  private static long serialVersionUID = 1L;
-
-  private Class<?> vectorOpClass;
-
   @Override
   public Object clone() throws CloneNotSupportedException {
     throw new CloneNotSupportedException("clone not supported");
   }
-
-  public void setVectorOp(Class<?> vectorOpClass) {
-    this.vectorOpClass = vectorOpClass;
-  }
-
-  public Class<?> getVectorOpClass() {
-    return vectorOpClass;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/AppMasterEventDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AppMasterEventDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AppMasterEventDesc.java
index c5294f0..264f959 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AppMasterEventDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AppMasterEventDesc.java
@@ -19,10 +19,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.IOException;
-import java.util.List;
 
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.io.DataOutputBuffer;
 
 
@@ -63,25 +60,4 @@ public class AppMasterEventDesc extends AbstractOperatorDesc {
   public void writeEventHeader(DataOutputBuffer buffer) throws IOException {
     // nothing to add
   }
-
-  public class AppMasterEventOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final AppMasterEventDesc appMasterEventDesc;
-    private final VectorAppMasterEventDesc vectorAppMasterEventDesc;
-
-    public AppMasterEventOperatorExplainVectorization(AppMasterEventDesc appMasterEventDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-      this.appMasterEventDesc = appMasterEventDesc;
-      vectorAppMasterEventDesc = (VectorAppMasterEventDesc) vectorDesc;
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "App Master Event Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public AppMasterEventOperatorExplainVectorization getAppMasterEventVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new AppMasterEventOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
index b061d5e..13a0811 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.LinkedHashSet;
 import java.util.List;
@@ -34,9 +33,7 @@ import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.hive.ql.optimizer.physical.VectorizerReason;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 
@@ -68,25 +65,12 @@ public abstract class BaseWork extends AbstractOperatorDesc {
 
   private String name;
 
-  /*
-   * Vectorization.
-   */
-
-  // This will be true if a node was examined by the Vectorizer class.
-  protected boolean vectorizationExamined;
-
-  protected boolean vectorizationEnabled;
+  // Vectorization.
 
   protected VectorizedRowBatchCtx vectorizedRowBatchCtx;
 
   protected boolean useVectorizedInputFileFormat;
 
-  private VectorizerReason notVectorizedReason;
-
-  private boolean groupByVectorOutput;
-  private boolean allNative;
-  private boolean usesVectorUDFAdaptor;
-
   protected boolean llapMode = false;
   protected boolean uberMode = false;
 
@@ -179,22 +163,6 @@ public abstract class BaseWork extends AbstractOperatorDesc {
 
   // -----------------------------------------------------------------------------------------------
 
-  public void setVectorizationExamined(boolean vectorizationExamined) {
-    this.vectorizationExamined = vectorizationExamined;
-  }
-
-  public boolean getVectorizationExamined() {
-    return vectorizationExamined;
-  }
-
-  public void setVectorizationEnabled(boolean vectorizationEnabled) {
-    this.vectorizationEnabled = vectorizationEnabled;
-  }
-
-  public boolean getVectorizationEnabled() {
-    return vectorizationEnabled;
-  }
-
   /*
    * The vectorization context for creating the VectorizedRowBatch for the node.
    */
@@ -206,160 +174,23 @@ public abstract class BaseWork extends AbstractOperatorDesc {
     this.vectorizedRowBatchCtx = vectorizedRowBatchCtx;
   }
 
-  public void setNotVectorizedReason(VectorizerReason notVectorizedReason) {
-    this.notVectorizedReason = notVectorizedReason;
-  }
-
-  public VectorizerReason getNotVectorizedReason() {
-    return notVectorizedReason;
-  }
-
-  public void setGroupByVectorOutput(boolean groupByVectorOutput) {
-    this.groupByVectorOutput = groupByVectorOutput;
-  }
-
-  public boolean getGroupByVectorOutput() {
-    return groupByVectorOutput;
-  }
-
-  public void setUsesVectorUDFAdaptor(boolean usesVectorUDFAdaptor) {
-    this.usesVectorUDFAdaptor = usesVectorUDFAdaptor;
-  }
-
-  public boolean getUsesVectorUDFAdaptor() {
-    return usesVectorUDFAdaptor;
-  }
-
-  public void setAllNative(boolean allNative) {
-    this.allNative = allNative;
+  /*
+   * Whether the HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT variable
+   * (hive.vectorized.use.vectorized.input.format) was true when the Vectorizer class evaluated
+   * vectorizing this node.
+   *
+   * When Vectorized Input File Format looks at this flag, it can determine whether it should
+   * operate vectorized or not.  In some modes, the node can be vectorized but use row
+   * serialization.
+   */
+  public void setUseVectorizedInputFileFormat(boolean useVectorizedInputFileFormat) {
+    this.useVectorizedInputFileFormat = useVectorizedInputFileFormat;
   }
 
-  public boolean getAllNative() {
-    return allNative;
+  public boolean getUseVectorizedInputFileFormat() {
+    return useVectorizedInputFileFormat;
   }
 
-  public static class BaseExplainVectorization {
-
-    private final BaseWork baseWork;
-
-    public BaseExplainVectorization(BaseWork baseWork) {
-      this.baseWork = baseWork;
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabled", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public boolean enabled() {
-      return baseWork.getVectorizationEnabled();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "vectorized", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public Boolean vectorized() {
-      if (!baseWork.getVectorizationEnabled()) {
-        return null;
-      }
-      return baseWork.getVectorMode();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "notVectorizedReason", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String notVectorizedReason() {
-      if (!baseWork.getVectorizationEnabled() || baseWork.getVectorMode()) {
-        return null;
-      }
-      VectorizerReason notVectorizedReason = baseWork.getNotVectorizedReason();
-      if (notVectorizedReason ==  null) {
-        return "Unknown";
-      }
-      return notVectorizedReason.toString();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "groupByVectorOutput", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public Boolean groupByRowOutputCascade() {
-      if (!baseWork.getVectorMode()) {
-        return null;
-      }
-      return baseWork.getGroupByVectorOutput();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "allNative", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public Boolean nativeVectorized() {
-      if (!baseWork.getVectorMode()) {
-        return null;
-      }
-      return baseWork.getAllNative();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "usesVectorUDFAdaptor", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public Boolean usesVectorUDFAdaptor() {
-      if (!baseWork.getVectorMode()) {
-        return null;
-      }
-      return baseWork.getUsesVectorUDFAdaptor();
-    }
-
-    public static class RowBatchContextExplainVectorization {
-
-      private final VectorizedRowBatchCtx vectorizedRowBatchCtx;
-
-      public RowBatchContextExplainVectorization(VectorizedRowBatchCtx vectorizedRowBatchCtx) {
-        this.vectorizedRowBatchCtx = vectorizedRowBatchCtx;
-      }
-
-      private List<String> getColumns(int startIndex, int count) {
-        String[] rowColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
-        TypeInfo[] rowColumnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
-        List<String> result = new ArrayList<String>(count);
-        final int end = startIndex + count;
-        for (int i = startIndex; i < end; i++) {
-          result.add(rowColumnNames[i] + ":" + rowColumnTypeInfos[i]);
-        }
-        return result;
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "dataColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public List<String> getDataColumns() {
-        return getColumns(0, vectorizedRowBatchCtx.getDataColumnCount());
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public List<String> getPartitionColumns() {
-        return getColumns(vectorizedRowBatchCtx.getDataColumnCount(), vectorizedRowBatchCtx.getPartitionColumnCount());
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "includeColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public String getDataColumnNums() {
-        int[] dataColumnNums = vectorizedRowBatchCtx.getDataColumnNums();
-        if (dataColumnNums == null) {
-          return null;
-        }
-        return Arrays.toString(vectorizedRowBatchCtx.getDataColumnNums());
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "dataColumnCount", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public int getDataColumnCount() {
-        return vectorizedRowBatchCtx.getDataColumnCount();
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumnCount", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public int getPartitionColumnCount() {
-        return vectorizedRowBatchCtx.getPartitionColumnCount();
-      }
-
-      @Explain(vectorization = Vectorization.DETAIL, displayName = "scratchColumnTypeNames", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-      public List<String> getScratchColumnTypeNames() {
-        return Arrays.asList(vectorizedRowBatchCtx.getScratchColumnTypeNames());
-      }
-
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "rowBatchContext", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public RowBatchContextExplainVectorization vectorizedRowBatchContext() {
-      if (!baseWork.getVectorMode()) {
-        return null;
-      }
-      return new RowBatchContextExplainVectorization(baseWork.getVectorizedRowBatchCtx());
-    }
-  }
-
-
   // -----------------------------------------------------------------------------------------------
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java
index 7b16ad7..b0b6c3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java
@@ -45,33 +45,4 @@ public @interface Explain {
   boolean displayOnlyOnTrue() default false;
 
   boolean skipHeader() default false;
-
-  // By default, many existing @Explain classes/methods are NON_VECTORIZED.
-  //
-  // Vectorized methods/classes have detail levels:
-  //     SUMMARY, OPERATOR, EXPRESSION, or DETAIL.
-  // As you go to the right you get more detail and the information for the previous level(s) is
-  // included.  The default is SUMMARY.
-  //
-  // The "path" enumerations are used to mark methods/classes that lead to vectorization specific
-  // ones so we can avoid displaying headers for things that have no vectorization information
-  // below.
-  //
-  // For example, the TezWork class is marked SUMMARY_PATH because it leads to both
-  // SUMMARY and OPERATOR methods/classes. And, MapWork.getAllRootOperators is marked OPERATOR_PATH
-  // because we only display operator information for OPERATOR.
-  //
-  // EXPRESSION and DETAIL typically live inside SUMMARY or OPERATOR classes.
-  //
-  public enum Vectorization {
-    SUMMARY_PATH(4), OPERATOR_PATH(3),
-    SUMMARY(4), OPERATOR(3), EXPRESSION(2), DETAIL(1),
-    NON_VECTORIZED(Integer.MAX_VALUE);
-
-    public final int rank;
-    Vectorization(int rank) {
-      this.rank = rank;
-    }
-  };
-  Vectorization vectorization() default Vectorization.NON_VECTORIZED;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
index 805357c..9f4767c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration;
-import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 
 /**
@@ -118,18 +117,6 @@ public class ExplainWork implements Serializable {
     return config.isFormatted();
   }
 
-  public boolean isVectorization() {
-    return config.isVectorization();
-  }
-
-  public boolean isVectorizationOnly() {
-    return config.isVectorizationOnly();
-  }
-
-  public VectorizationDetailLevel isVectorizationDetailLevel() {
-    return config.getVectorizationDetailLevel();
-  }
-
   public ParseContext getParseContext() {
     return pCtx;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java
index 3c69f69..8ea6440 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java
@@ -22,7 +22,6 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
@@ -31,17 +30,14 @@ import org.apache.hadoop.hive.ql.exec.ListSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
-import org.apache.hadoop.hive.ql.plan.BaseWork.BaseExplainVectorization;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
 /**
  * FetchWork.
  *
  */
-@Explain(displayName = "Fetch Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-    vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Fetch Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
 public class FetchWork implements Serializable {
   private static final long serialVersionUID = 1L;
 
@@ -311,43 +307,4 @@ public class FetchWork implements Serializable {
 
     return ret;
   }
-
-  // -----------------------------------------------------------------------------------------------
-
-  private boolean vectorizationExamined;
-
-  public void setVectorizationExamined(boolean vectorizationExamined) {
-    this.vectorizationExamined = vectorizationExamined;
-  }
-
-  public boolean getVectorizationExamined() {
-    return vectorizationExamined;
-  }
-
-  public class FetchExplainVectorization {
-
-    private final FetchWork fetchWork;
-
-    public FetchExplainVectorization(FetchWork fetchWork) {
-      this.fetchWork = fetchWork;
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabled", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public boolean enabled() {
-      return false;
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabledConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> enabledConditionsNotMet() {
-      return VectorizationCondition.getConditionsSupported(false);
-    }
-  }
-
-  @Explain(vectorization = Vectorization.SUMMARY, displayName = "Fetch Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public FetchExplainVectorization getMapExplainVectorization() {
-    if (!getVectorizationExamined()) {
-      return null;
-    }
-    return new FetchExplainVectorization(this);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index bbc5f10..07ed4fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
+
 
 /**
  * FileSinkDesc.
@@ -474,19 +474,4 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     this.statsTmpDir = statsCollectionTempDir;
   }
 
-  public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) {
-      // Native vectorization not supported.
-      super(vectorDesc, false);
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "File Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public FileSinkOperatorExplainVectorization getFileSinkVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new FileSinkOperatorExplainVectorization(vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
index ff69775..fa20798 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
@@ -22,7 +22,6 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 
 
@@ -178,7 +177,6 @@ public class FilterDesc extends AbstractOperatorDesc {
     this.syntheticJoinPredicate = syntheticJoinPredicate;
   }
 
-
   @Override
   public Object clone() {
     FilterDesc filterDesc = new FilterDesc(getPredicate().clone(), getIsSamplingPred());
@@ -188,30 +186,4 @@ public class FilterDesc extends AbstractOperatorDesc {
     filterDesc.setSortedFilter(isSortedFilter());
     return filterDesc;
   }
-
-  public class FilterOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final FilterDesc filterDesc;
-    private final VectorFilterDesc vectorFilterDesc;
-
-    public FilterOperatorExplainVectorization(FilterDesc filterDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-      this.filterDesc = filterDesc;
-      vectorFilterDesc = (VectorFilterDesc) vectorDesc;
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "predicateExpression", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getPredicateExpression() {
-      return vectorFilterDesc.getPredicateExpression().toString();
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Filter Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public FilterOperatorExplainVectorization getFilterVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new FilterOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
index 204277e..99791e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
@@ -19,18 +19,13 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 
-import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
+import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.udf.UDFType;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hive.common.util.AnnotationUtils;
-import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 
 
 /**
@@ -76,8 +71,11 @@ public class GroupByDesc extends AbstractOperatorDesc {
   transient private boolean isDistinct;
   private boolean dontResetAggrsDistinct;
 
+  // Extra parameters only for vectorization.
+  private VectorGroupByDesc vectorDesc;
+
   public GroupByDesc() {
-    vectorDesc = null;
+    vectorDesc = new VectorGroupByDesc();
   }
 
   public GroupByDesc(
@@ -108,7 +106,7 @@ public class GroupByDesc extends AbstractOperatorDesc {
       final boolean groupingSetsPresent,
       final int groupingSetsPosition,
       final boolean isDistinct) {
-    vectorDesc = null;
+    vectorDesc = new VectorGroupByDesc();
     this.mode = mode;
     this.outputColumnNames = outputColumnNames;
     this.keys = keys;
@@ -122,6 +120,14 @@ public class GroupByDesc extends AbstractOperatorDesc {
     this.isDistinct = isDistinct;
   }
 
+  public void setVectorDesc(VectorGroupByDesc vectorDesc) {
+    this.vectorDesc = vectorDesc;
+  }
+
+  public VectorGroupByDesc getVectorDesc() {
+    return vectorDesc;
+  }
+
   public Mode getMode() {
     return mode;
   }
@@ -305,66 +311,4 @@ public class GroupByDesc extends AbstractOperatorDesc {
     this.isDistinct = isDistinct;
   }
 
-  public class GroupByOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final GroupByDesc groupByDesc;
-    private final VectorGroupByDesc vectorGroupByDesc;
-
-    public GroupByOperatorExplainVectorization(GroupByDesc groupByDesc, VectorDesc vectorDesc) {
-      // Native vectorization not supported.
-      super(vectorDesc, false);
-      this.groupByDesc = groupByDesc;
-      vectorGroupByDesc = (VectorGroupByDesc) vectorDesc;
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "keyExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getKeysExpression() {
-      return vectorExpressionsToStringList(vectorGroupByDesc.getKeyExpressions());
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "aggregators", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getAggregators() {
-      VectorAggregateExpression[] vecAggregators = vectorGroupByDesc.getAggregators();
-      List<String> vecAggrList = new ArrayList<String>(vecAggregators.length);
-      for (VectorAggregateExpression vecAggr : vecAggregators) {
-        vecAggrList.add(vecAggr.toString());
-      }
-      return vecAggrList;
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "vectorOutput", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public boolean getGroupByRowOutputCascade() {
-      return vectorGroupByDesc.isVectorOutput();
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "vectorOutputConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getVectorOutputConditionsNotMet() {
-      List<String> results = new ArrayList<String>();
-      VectorAggregateExpression[] vecAggregators = vectorGroupByDesc.getAggregators();
-      for (VectorAggregateExpression vecAggr : vecAggregators) {
-        Category category = Vectorizer.aggregationOutputCategory(vecAggr);
-        if (category != ObjectInspector.Category.PRIMITIVE) {
-          results.add(
-              "Vector output of " + vecAggr.toString() + " output type " + category + " requires PRIMITIVE IS false");
-        }
-      }
-      if (results.size() == 0) {
-        return null;
-      }
-      return results;
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "projectedOutputColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getProjectedOutputColumns() {
-      return Arrays.toString(vectorGroupByDesc.getProjectedOutputColumns());
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Group By Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public GroupByOperatorExplainVectorization getGroupByVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new GroupByOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java
index a338319..94ac41e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HashTableSinkDesc.java
@@ -26,7 +26,6 @@ import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java
index 45ec431..8448a41 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java
@@ -17,10 +17,7 @@
  */
 
 package org.apache.hadoop.hive.ql.plan;
-import java.util.List;
-
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 
 /**
@@ -76,19 +73,4 @@ public class LimitDesc extends AbstractOperatorDesc {
     this.leastRows = leastRows;
   }
 
-  public class LimitOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    public LimitOperatorExplainVectorization(LimitDesc limitDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Limit Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public LimitOperatorExplainVectorization getLimitVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new LimitOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
index 3633fde..ec35860 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
@@ -20,24 +20,14 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping;
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.OperatorVariation;
 
 /**
  * Map Join operator Descriptor implementation.
@@ -83,16 +73,17 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
   private boolean isHybridHashJoin;
   private boolean isDynamicPartitionHashJoin = false;
 
+  // Extra parameters only for vectorization.
+  private VectorMapJoinDesc vectorDesc;
+
   public MapJoinDesc() {
-    vectorDesc = null;
+    vectorDesc = new VectorMapJoinDesc();
     bigTableBucketNumMapping = new LinkedHashMap<String, Integer>();
   }
 
   public MapJoinDesc(MapJoinDesc clone) {
     super(clone);
-    if (clone.vectorDesc != null) {
-      throw new RuntimeException("Clone with vectorization desc not supported");
-    }
+    vectorDesc = new VectorMapJoinDesc(clone.vectorDesc);
     this.keys = clone.keys;
     this.keyTblDesc = clone.keyTblDesc;
     this.valueTblDescs = clone.valueTblDescs;
@@ -117,7 +108,7 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
       final int posBigTable, final JoinCondDesc[] conds,
       final Map<Byte, List<ExprNodeDesc>> filters, boolean noOuterJoin, String dumpFilePrefix) {
     super(values, outputColumnNames, noOuterJoin, conds, filters, null);
-    vectorDesc = null;
+    vectorDesc = new VectorMapJoinDesc();
     this.keys = keys;
     this.keyTblDesc = keyTblDesc;
     this.valueTblDescs = valueTblDescs;
@@ -128,6 +119,14 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
     initRetainExprList();
   }
 
+  public void setVectorDesc(VectorMapJoinDesc vectorDesc) {
+    this.vectorDesc = vectorDesc;
+  }
+
+  public VectorMapJoinDesc getVectorDesc() {
+    return vectorDesc;
+  }
+
   private void initRetainExprList() {
     retainList = new HashMap<Byte, List<Integer>>();
     Set<Entry<Byte, List<ExprNodeDesc>>> set = super.getExprs().entrySet();
@@ -389,193 +388,4 @@ public class MapJoinDesc extends JoinDesc implements Serializable {
   public void setDynamicPartitionHashJoin(boolean isDistributedHashJoin) {
     this.isDynamicPartitionHashJoin = isDistributedHashJoin;
   }
-
-  // Use LinkedHashSet to give predictable display order.
-  private static Set<String> vectorizableMapJoinNativeEngines =
-      new LinkedHashSet<String>(Arrays.asList("tez", "spark"));
-
-  public class MapJoinOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final MapJoinDesc mapJoinDesc;
-    private final VectorMapJoinDesc vectorMapJoinDesc;
-    private final VectorMapJoinInfo vectorMapJoinInfo;
-
-    private VectorizationCondition[] nativeConditions;
-
-    public MapJoinOperatorExplainVectorization(MapJoinDesc mapJoinDesc, VectorDesc vectorDesc) {
-      // VectorMapJoinOperator is not native vectorized.
-      super(vectorDesc, ((VectorMapJoinDesc) vectorDesc).hashTableImplementationType() != HashTableImplementationType.NONE);
-      this.mapJoinDesc = mapJoinDesc;
-      vectorMapJoinDesc = (VectorMapJoinDesc) vectorDesc;
-      vectorMapJoinInfo = vectorMapJoinDesc.getVectorMapJoinInfo();
-    }
-
-    private VectorizationCondition[] createNativeConditions() {
-
-      boolean enabled = vectorMapJoinDesc.getIsVectorizationMapJoinNativeEnabled();
-
-      String engine = vectorMapJoinDesc.getEngine();
-      String engineInSupportedCondName =
-          HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " " + engine + " IN " + vectorizableMapJoinNativeEngines;
-      boolean engineInSupported = vectorizableMapJoinNativeEngines.contains(engine);
-
-      boolean isFastHashTableEnabled = vectorMapJoinDesc.getIsFastHashTableEnabled();
-      boolean isHybridHashJoin = vectorMapJoinDesc.getIsHybridHashJoin();
-
-      boolean whenFastHashTableThenNoHybrid =
-          (!isFastHashTableEnabled ? true : !isHybridHashJoin);
-
-      VectorizationCondition[] conditions = new VectorizationCondition[] {
-          new VectorizationCondition(
-              enabled,
-              HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED.varname),
-          new VectorizationCondition(
-              engineInSupported,
-              engineInSupportedCondName),
-          new VectorizationCondition(
-              vectorMapJoinDesc.getOneMapJoinCondition(),
-              "One MapJoin Condition"),
-          new VectorizationCondition(
-              !vectorMapJoinDesc.getHasNullSafes(),
-              "No nullsafe"),
-          new VectorizationCondition(
-              vectorMapJoinDesc.getSupportsKeyTypes(),
-              "Supports Key Types"),
-          new VectorizationCondition(
-              !vectorMapJoinDesc.getIsEmptyKey(),
-              "Not empty key"),
-          new VectorizationCondition(
-              whenFastHashTableThenNoHybrid,
-              "When Fast Hash Table, then requires no Hybrid Hash Join"),
-          new VectorizationCondition(
-              vectorMapJoinDesc.getSmallTableExprVectorizes(),
-              "Small table vectorizes"),
-      };
-      return conditions;
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "nativeConditionsMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getNativeConditionsMet() {
-      if (nativeConditions == null) {
-        nativeConditions = createNativeConditions();
-      }
-      return VectorizationCondition.getConditionsMet(nativeConditions);
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "nativeConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getNativeConditionsNotMet() {
-      if (nativeConditions == null) {
-        nativeConditions = createNativeConditions();
-      }
-      return VectorizationCondition.getConditionsNotMet(nativeConditions);
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "bigTableKeyExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getBigTableKeyExpressions() {
-      if (!isNative) {
-        return null;
-      }
-      return vectorExpressionsToStringList(vectorMapJoinInfo.getBigTableKeyExpressions());
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "bigTableKeyColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getBigTableKeyColumns() {
-      if (!isNative) {
-        return null;
-      }
-      int[] bigTableKeyColumnMap = vectorMapJoinInfo.getBigTableKeyColumnMap();
-      if (bigTableKeyColumnMap.length == 0) {
-        return null;
-      }
-      return Arrays.toString(bigTableKeyColumnMap);
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "bigTableValueExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getBigTableValueExpressions() {
-      if (!isNative) {
-        return null;
-      }
-      return vectorExpressionsToStringList(vectorMapJoinInfo.getBigTableValueExpressions());
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "bigTableValueColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getBigTableValueColumns() {
-      if (!isNative) {
-        return null;
-      }
-      int[] bigTableValueColumnMap = vectorMapJoinInfo.getBigTableValueColumnMap();
-      if (bigTableValueColumnMap.length == 0) {
-        return null;
-      }
-      return Arrays.toString(bigTableValueColumnMap);
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "smallTableMapping", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getSmallTableColumns() {
-      if (!isNative) {
-        return null;
-      }
-      return outputColumnsToStringList(vectorMapJoinInfo.getSmallTableMapping());
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "projectedOutputColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getProjectedOutputColumns() {
-      if (!isNative) {
-        return null;
-      }
-      return outputColumnsToStringList(vectorMapJoinInfo.getProjectionMapping());
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "bigTableOuterKeyMapping", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getBigTableOuterKey() {
-      if (!isNative || vectorMapJoinDesc.operatorVariation() != OperatorVariation.OUTER) {
-        return null;
-      }
-      return columnMappingToStringList(vectorMapJoinInfo.getBigTableOuterKeyMapping());
-    }
-
-    @Explain(vectorization = Vectorization.DETAIL, displayName = "bigTableRetainedColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getBigTableRetainedColumns() {
-      if (!isNative) {
-        return null;
-      }
-      return outputColumnsToStringList(vectorMapJoinInfo.getBigTableRetainedMapping());
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "nativeNotSupportedKeyTypes", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getNativeNotSupportedKeyTypes() {
-      return vectorMapJoinDesc.getNotSupportedKeyTypes();
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Map Join Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public MapJoinOperatorExplainVectorization getMapJoinVectorization() {
-    if (vectorDesc == null || this instanceof SMBJoinDesc) {
-      return null;
-    }
-    return new MapJoinOperatorExplainVectorization(this, vectorDesc);
-  }
-
-  public class SMBJoinOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final SMBJoinDesc smbJoinDesc;
-    private final VectorSMBJoinDesc vectorSMBJoinDesc;
-
-    public SMBJoinOperatorExplainVectorization(SMBJoinDesc smbJoinDesc, VectorDesc vectorDesc) {
-      // Native vectorization NOT supported.
-      super(vectorDesc, false);
-      this.smbJoinDesc = smbJoinDesc;
-      vectorSMBJoinDesc = (VectorSMBJoinDesc) vectorDesc;
-    }
-  }
-
-  // Handle dual nature.
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "SMB Map Join Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public SMBJoinOperatorExplainVectorization getSMBJoinVectorization() {
-    if (vectorDesc == null || !(this instanceof SMBJoinDesc)) {
-      return null;
-    }
-    return new SMBJoinOperatorExplainVectorization((SMBJoinDesc) this, vectorDesc);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 081c511..5cc3663 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -26,7 +26,6 @@ import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
@@ -48,10 +47,8 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
-import org.apache.hadoop.hive.ql.optimizer.physical.VectorizerReason;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
@@ -143,12 +140,6 @@ public class MapWork extends BaseWork {
 
   private VectorizedRowBatch vectorizedRowBatch;
 
-  private VectorizerReason notEnabledInputFileFormatReason;
-
-  private Set<String> vectorizationInputFileFormatClassNameSet;
-  private List<String> vectorizationEnabledConditionsMet;
-  private List<String> vectorizationEnabledConditionsNotMet;
-
   // bitsets can't be correctly serialized by Kryo's default serializer
   // BitSet::wordsInUse is transient, so force dumping into a lower form
   private byte[] includedBuckets;
@@ -366,7 +357,7 @@ public class MapWork extends BaseWork {
     return nameToSplitSample;
   }
 
-  @Explain(displayName = "LLAP IO", vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "LLAP IO")
   public String getLlapIoDesc() {
     return llapIoDesc;
   }
@@ -438,8 +429,7 @@ public class MapWork extends BaseWork {
     }
   }
 
-  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getExecutionMode() {
     if (vectorMode) {
       if (llapMode) {
@@ -469,8 +459,7 @@ public class MapWork extends BaseWork {
   }
 
   @Override
-  @Explain(displayName = "Map Operator Tree", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.OPERATOR_PATH)
+  @Explain(displayName = "Map Operator Tree", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public Set<Operator<? extends OperatorDesc>> getAllRootOperators() {
     Set<Operator<?>> opSet = new LinkedHashSet<Operator<?>>();
 
@@ -727,86 +716,4 @@ public class MapWork extends BaseWork {
   public VectorizedRowBatch getVectorizedRowBatch() {
     return vectorizedRowBatch;
   }
-
-  /*
-   * Whether the HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT variable
-   * (hive.vectorized.use.vectorized.input.format) was true when the Vectorizer class evaluated
-   * vectorizing this node.
-   *
-   * When Vectorized Input File Format looks at this flag, it can determine whether it should
-   * operate vectorized or not.  In some modes, the node can be vectorized but use row
-   * serialization.
-   */
-  public void setUseVectorizedInputFileFormat(boolean useVectorizedInputFileFormat) {
-    this.useVectorizedInputFileFormat = useVectorizedInputFileFormat;
-  }
-
-  public boolean getUseVectorizedInputFileFormat() {
-    return useVectorizedInputFileFormat;
-  }
-
-  public void setNotEnabledInputFileFormatReason(VectorizerReason notEnabledInputFileFormatReason) {
-    this.notEnabledInputFileFormatReason = notEnabledInputFileFormatReason;
-  }
-
-  public VectorizerReason getNotEnabledInputFileFormatReason() {
-    return notEnabledInputFileFormatReason;
-  }
-
-  public void setVectorizationInputFileFormatClassNameSet(Set<String> vectorizationInputFileFormatClassNameSet) {
-    this.vectorizationInputFileFormatClassNameSet = vectorizationInputFileFormatClassNameSet;
-  }
-
-  public Set<String> getVectorizationInputFileFormatClassNameSet() {
-    return vectorizationInputFileFormatClassNameSet;
-  }
-
-  public void setVectorizationEnabledConditionsMet(ArrayList<String> vectorizationEnabledConditionsMet) {
-    this.vectorizationEnabledConditionsMet = VectorizationCondition.addBooleans(vectorizationEnabledConditionsMet, true);
-  }
-
-  public List<String> getVectorizationEnabledConditionsMet() {
-    return vectorizationEnabledConditionsMet;
-  }
-
-  public void setVectorizationEnabledConditionsNotMet(List<String> vectorizationEnabledConditionsNotMet) {
-    this.vectorizationEnabledConditionsNotMet = VectorizationCondition.addBooleans(vectorizationEnabledConditionsNotMet, false);
-  }
-
-  public List<String> getVectorizationEnabledConditionsNotMet() {
-    return vectorizationEnabledConditionsNotMet;
-  }
-
-  public class MapExplainVectorization extends BaseExplainVectorization {
-
-    private final MapWork mapWork;
-
-    public MapExplainVectorization(MapWork mapWork) {
-      super(mapWork);
-      this.mapWork = mapWork;
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "inputFileFormats", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public Set<String> inputFileFormats() {
-      return mapWork.getVectorizationInputFileFormatClassNameSet();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabledConditionsMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> enabledConditionsMet() {
-      return mapWork.getVectorizationEnabledConditionsMet();
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enabledConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> enabledConditionsNotMet() {
-      return mapWork.getVectorizationEnabledConditionsNotMet();
-    }
-  }
-
-  @Explain(vectorization = Vectorization.SUMMARY, displayName = "Map Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public MapExplainVectorization getMapExplainVectorization() {
-    if (!getVectorizationExamined()) {
-      return null;
-    }
-    return new MapExplainVectorization(this);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java
index 76b5138..82143a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java
@@ -32,15 +32,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 
 /**
  * MapredLocalWork.
  *
  */
-@Explain(displayName = "Map Reduce Local Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-    vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Map Reduce Local Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
 public class MapredLocalWork implements Serializable {
   private static final long serialVersionUID = 1L;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
index af9adc2..aa7f6ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
@@ -24,15 +24,14 @@ import java.util.List;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
+
 
 
 /**
  * MapredWork.
  *
  */
-@Explain(displayName = "Map Reduce", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-    vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Map Reduce", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
 public class MapredWork extends AbstractOperatorDesc {
   private static final long serialVersionUID = 1L;
 
@@ -41,8 +40,7 @@ public class MapredWork extends AbstractOperatorDesc {
 
   private boolean finalMapRed;
 
-  @Explain(skipHeader = true, displayName = "Map", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(skipHeader = true, displayName = "Map", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public MapWork getMapWork() {
     return mapWork;
   }
@@ -51,8 +49,7 @@ public class MapredWork extends AbstractOperatorDesc {
     this.mapWork = mapWork;
   }
 
-  @Explain(skipHeader = true, displayName = "Reduce", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(skipHeader = true, displayName = "Reduce", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public ReduceWork getReduceWork() {
     return reduceWork;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorExplainVectorization.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorExplainVectorization.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorExplainVectorization.java
deleted file mode 100644
index bdf9859..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorExplainVectorization.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnMapping;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-
-public class OperatorExplainVectorization {
-
-  protected final VectorDesc vectorDesc;
-
-  protected final boolean isNative;
-
-  public OperatorExplainVectorization(VectorDesc vectorDesc, boolean isNative) {
-    this.vectorDesc = vectorDesc;
-    this.isNative = isNative;
-  }
-
-  public List<String> vectorExpressionsToStringList(VectorExpression[] vectorExpressions) {
-    if (vectorExpressions == null) {
-      return null;
-    }
-    List<String> vecExprList = new ArrayList<String>(vectorExpressions.length);
-    for (VectorExpression vecExpr : vectorExpressions) {
-      vecExprList.add(vecExpr.toString());
-    }
-    return vecExprList;
-  }
-
-  public String outputColumnsToStringList(VectorColumnMapping vectorColumnMapping) {
-    final int size = vectorColumnMapping.getCount();
-    if (size == 0) {
-      return null;
-    }
-    int[] outputColumns = vectorColumnMapping.getOutputColumns();
-    return Arrays.toString(outputColumns);
-  }
-
-  public List<String> columnMappingToStringList(VectorColumnMapping vectorColumnMapping) {
-    final int size = vectorColumnMapping.getCount();
-    if (size == 0) {
-      return null;
-    }
-    int[] inputColumns = vectorColumnMapping.getInputColumns();
-    int[] outputColumns = vectorColumnMapping.getOutputColumns();
-    ArrayList<String> result = new ArrayList<String>(size);
-    for (int i = 0; i < size; i++) {
-      result.add(inputColumns[i] + " -> " + outputColumns[i]);
-    }
-    return result;
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "className", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public String getClassName() {
-    return vectorDesc.getVectorOpClass().getSimpleName();
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "native", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public boolean getNative() {
-    return isNative;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
index b8c2d42..d7e404c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
@@ -19,18 +19,11 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
 import java.util.List;
-import java.util.Set;
 
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc.ReduceSinkKeyType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -128,6 +121,9 @@ public class ReduceSinkDesc extends AbstractOperatorDesc {
 
   private static transient Logger LOG = LoggerFactory.getLogger(ReduceSinkDesc.class);
 
+  // Extra parameters only for vectorization.
+  private VectorReduceSinkDesc vectorDesc;
+
   public ReduceSinkDesc() {
   }
 
@@ -191,6 +187,14 @@ public class ReduceSinkDesc extends AbstractOperatorDesc {
     return desc;
   }
 
+  public void setVectorDesc(VectorReduceSinkDesc vectorDesc) {
+    this.vectorDesc = vectorDesc;
+  }
+
+  public VectorReduceSinkDesc getVectorDesc() {
+    return vectorDesc;
+  }
+
   public java.util.ArrayList<java.lang.String> getOutputKeyColumnNames() {
     return outputKeyColumnNames;
   }
@@ -486,105 +490,4 @@ public class ReduceSinkDesc extends AbstractOperatorDesc {
     this.hasOrderBy = hasOrderBy;
   }
 
-  // Use LinkedHashSet to give predictable display order.
-  private static Set<String> vectorizableReduceSinkNativeEngines =
-      new LinkedHashSet<String>(Arrays.asList("tez", "spark"));
-
-  public class ReduceSinkOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final ReduceSinkDesc reduceSinkDesc;
-    private final VectorReduceSinkDesc vectorReduceSinkDesc;
-    private final VectorReduceSinkInfo vectorReduceSinkInfo; 
-
-    private VectorizationCondition[] nativeConditions;
-
-    public ReduceSinkOperatorExplainVectorization(ReduceSinkDesc reduceSinkDesc, VectorDesc vectorDesc) {
-      // VectorReduceSinkOperator is not native vectorized.
-      super(vectorDesc, ((VectorReduceSinkDesc) vectorDesc).reduceSinkKeyType()!= ReduceSinkKeyType.NONE);
-      this.reduceSinkDesc = reduceSinkDesc;
-      vectorReduceSinkDesc = (VectorReduceSinkDesc) vectorDesc;
-      vectorReduceSinkInfo = vectorReduceSinkDesc.getVectorReduceSinkInfo();
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "keyExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getKeyExpression() {
-      if (!isNative) {
-        return null;
-      }
-      return vectorExpressionsToStringList(vectorReduceSinkInfo.getReduceSinkKeyExpressions());
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "valueExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getValueExpression() {
-      if (!isNative) {
-        return null;
-      }
-      return vectorExpressionsToStringList(vectorReduceSinkInfo.getReduceSinkValueExpressions());
-    }
-
-    private VectorizationCondition[] createNativeConditions() {
-
-      boolean enabled = vectorReduceSinkDesc.getIsVectorizationReduceSinkNativeEnabled();
- 
-      String engine = vectorReduceSinkDesc.getEngine();
-      String engineInSupportedCondName =
-          HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " " + engine + " IN " + vectorizableReduceSinkNativeEngines;
-      boolean engineInSupported = vectorizableReduceSinkNativeEngines.contains(engine);
-
-      VectorizationCondition[] conditions = new VectorizationCondition[] {
-          new VectorizationCondition(
-              enabled,
-              HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED.varname),
-          new VectorizationCondition(
-              engineInSupported,
-              engineInSupportedCondName),
-          new VectorizationCondition(
-              !vectorReduceSinkDesc.getAcidChange(),
-              "Not ACID UPDATE or DELETE"),
-          new VectorizationCondition(
-              !vectorReduceSinkDesc.getHasBuckets(),
-              "No buckets"),
-          new VectorizationCondition(
-              !vectorReduceSinkDesc.getHasTopN(),
-              "No TopN"),
-          new VectorizationCondition(
-              vectorReduceSinkDesc.getUseUniformHash(),
-              "Uniform Hash"),
-          new VectorizationCondition(
-              !vectorReduceSinkDesc.getHasDistinctColumns(),
-              "No DISTINCT columns"),
-          new VectorizationCondition(
-              vectorReduceSinkDesc.getIsKeyBinarySortable(),
-              "BinarySortableSerDe for keys"),
-          new VectorizationCondition(
-              vectorReduceSinkDesc.getIsValueLazyBinary(),
-              "LazyBinarySerDe for values")
-      };
-      return conditions;
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "nativeConditionsMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getNativeConditionsMet() {
-      if (nativeConditions == null) {
-        nativeConditions = createNativeConditions();
-      }
-      return VectorizationCondition.getConditionsMet(nativeConditions);
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "nativeConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getNativeConditionsNotMet() {
-      if (nativeConditions == null) {
-        nativeConditions = createNativeConditions();
-      }
-      return VectorizationCondition.getConditionsNotMet(nativeConditions);
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Reduce Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public ReduceSinkOperatorExplainVectorization getReduceSinkVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new ReduceSinkOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
index f4ab2a0..72fc4ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
@@ -19,23 +19,17 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
-import org.apache.hadoop.hive.ql.optimizer.physical.VectorizerReason;
-import org.apache.hadoop.hive.ql.plan.BaseWork.BaseExplainVectorization;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -95,9 +89,6 @@ public class ReduceWork extends BaseWork {
   private ObjectInspector keyObjectInspector = null;
   private ObjectInspector valueObjectInspector = null;
 
-  private boolean reduceVectorizationEnabled;
-  private String vectorReduceEngine;
-
   /**
    * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
    * to keySerializeInfo of the ReduceSink
@@ -151,8 +142,7 @@ public class ReduceWork extends BaseWork {
     this.tagToValueDesc = tagToValueDesc;
   }
 
-  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Execution mode", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public String getExecutionMode() {
     if (vectorMode) {
       if (llapMode) {
@@ -170,8 +160,7 @@ public class ReduceWork extends BaseWork {
     return null;
   }
 
-  @Explain(displayName = "Reduce Operator Tree", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.OPERATOR_PATH)
+  @Explain(displayName = "Reduce Operator Tree", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public Operator<?> getReducer() {
     return reducer;
   }
@@ -263,81 +252,4 @@ public class ReduceWork extends BaseWork {
   public void setMaxReduceTasks(int maxReduceTasks) {
     this.maxReduceTasks = maxReduceTasks;
   }
-
-  public void setReduceVectorizationEnabled(boolean reduceVectorizationEnabled) {
-    this.reduceVectorizationEnabled = reduceVectorizationEnabled;
-  }
-
-  public boolean getReduceVectorizationEnabled() {
-    return reduceVectorizationEnabled;
-  }
-
-  public void setVectorReduceEngine(String vectorReduceEngine) {
-    this.vectorReduceEngine = vectorReduceEngine;
-  }
-
-  public String getVectorReduceEngine() {
-    return vectorReduceEngine;
-  }
-
-  // Use LinkedHashSet to give predictable display order.
-  private static Set<String> reduceVectorizableEngines =
-      new LinkedHashSet<String>(Arrays.asList("tez", "spark"));
-
-  public class ReduceExplainVectorization extends BaseExplainVectorization {
-
-    private final ReduceWork reduceWork;
-
-    private VectorizationCondition[] reduceVectorizationConditions;
-
-    public ReduceExplainVectorization(ReduceWork reduceWork) {
-      super(reduceWork);
-      this.reduceWork = reduceWork;
-    }
-
-    private VectorizationCondition[] createReduceExplainVectorizationConditions() {
-
-      boolean enabled = reduceWork.getReduceVectorizationEnabled();
-
-      String engine = reduceWork.getVectorReduceEngine();
-      String engineInSupportedCondName =
-          HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " " + engine + " IN " + reduceVectorizableEngines;
-
-      boolean engineInSupported = reduceVectorizableEngines.contains(engine);
-
-      VectorizationCondition[] conditions = new VectorizationCondition[] {
-          new VectorizationCondition(
-              enabled,
-              HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED.varname),
-          new VectorizationCondition(
-              engineInSupported,
-              engineInSupportedCondName)
-      };
-      return conditions;
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enableConditionsMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getEnableConditionsMet() {
-      if (reduceVectorizationConditions == null) {
-        reduceVectorizationConditions = createReduceExplainVectorizationConditions();
-      }
-      return VectorizationCondition.getConditionsMet(reduceVectorizationConditions);
-    }
-
-    @Explain(vectorization = Vectorization.SUMMARY, displayName = "enableConditionsNotMet", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getEnableConditionsNotMet() {
-      if (reduceVectorizationConditions == null) {
-        reduceVectorizationConditions = createReduceExplainVectorizationConditions();
-      }
-      return VectorizationCondition.getConditionsNotMet(reduceVectorizationConditions);
-    }
-  }
-
-  @Explain(vectorization = Vectorization.SUMMARY, displayName = "Reduce Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public ReduceExplainVectorization getReduceExplainVectorization() {
-    if (!getVectorizationExamined()) {
-      return null;
-    }
-    return new ReduceExplainVectorization(this);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java
index 0601ce0..67a8327 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java
@@ -19,11 +19,8 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
-
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 
 /**
@@ -138,36 +135,4 @@ public class SelectDesc extends AbstractOperatorDesc {
   public void setSelStarNoCompute(boolean selStarNoCompute) {
     this.selStarNoCompute = selStarNoCompute;
   }
-
-
-  public class SelectOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final SelectDesc selectDesc;
-    private final VectorSelectDesc vectorSelectDesc;
-
-    public SelectOperatorExplainVectorization(SelectDesc selectDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-      this.selectDesc = selectDesc;
-      vectorSelectDesc = (VectorSelectDesc) vectorDesc;
-    }
-
-    @Explain(vectorization = Vectorization.OPERATOR, displayName = "selectExpressions", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public List<String> getSelectExpressions() {
-      return vectorExpressionsToStringList(vectorSelectDesc.getSelectExpressions());
-    }
-
-    @Explain(vectorization = Vectorization.EXPRESSION, displayName = "projectedOutputColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-    public String getProjectedOutputColumns() {
-      return Arrays.toString(vectorSelectDesc.getProjectedOutputColumns());
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Select Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public SelectOperatorExplainVectorization getSelectVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new SelectOperatorExplainVectorization(this, vectorDesc);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkHashTableSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkHashTableSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkHashTableSinkDesc.java
index 260bc07..8833ae3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkHashTableSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkHashTableSinkDesc.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-
 /**
  * Map Join operator Descriptor implementation.
  *
@@ -46,26 +43,4 @@ public class SparkHashTableSinkDesc extends HashTableSinkDesc {
   public void setTag(byte tag) {
     this.tag = tag;
   }
-
-  public class SparkHashTableSinkOperatorExplainVectorization extends OperatorExplainVectorization {
-
-    private final HashTableSinkDesc filterDesc;
-    private final VectorSparkHashTableSinkDesc vectorHashTableSinkDesc;
-
-    public SparkHashTableSinkOperatorExplainVectorization(HashTableSinkDesc filterDesc, VectorDesc vectorDesc) {
-      // Native vectorization supported.
-      super(vectorDesc, true);
-      this.filterDesc = filterDesc;
-      vectorHashTableSinkDesc = (VectorSparkHashTableSinkDesc) vectorDesc;
-    }
-  }
-
-  @Explain(vectorization = Vectorization.OPERATOR, displayName = "Spark Hash Table Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED })
-  public SparkHashTableSinkOperatorExplainVectorization getHashTableSinkVectorization() {
-    if (vectorDesc == null) {
-      return null;
-    }
-    return new SparkHashTableSinkOperatorExplainVectorization(this, vectorDesc);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 066e32d..bb5dd79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -33,7 +34,6 @@ import java.util.Set;
 
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 
 import com.google.common.base.Preconditions;
 
@@ -43,7 +43,7 @@ import com.google.common.base.Preconditions;
  * roots and and ReduceWork at all other nodes.
  */
 @SuppressWarnings("serial")
-@Explain(displayName = "Spark", vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Spark")
 public class SparkWork extends AbstractOperatorDesc {
   private static int counter;
   private final String name;
@@ -76,7 +76,7 @@ public class SparkWork extends AbstractOperatorDesc {
   /**
    * @return a map of "vertex name" to BaseWork
    */
-  @Explain(displayName = "Vertices", vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Vertices")
   public Map<String, BaseWork> getWorkMap() {
     Map<String, BaseWork> result = new LinkedHashMap<String, BaseWork>();
     for (BaseWork w: getAllWork()) {


[28/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
index d9e701a..ca07200 100644
--- a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
@@ -32,16 +32,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@orc_table_2a
 POSTHOOK: Lineage: orc_table_2a.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -59,23 +55,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -83,13 +68,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            className: VectorMapJoinInnerBigOnlyLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col1
                         input vertices:
                           1 Map 2
@@ -97,16 +75,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: int)
                           outputColumnNames: _col0
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0]
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -114,66 +85,25 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: c:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: a (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -192,16 +122,12 @@ POSTHOOK: Input: default@orc_table_1a
 POSTHOOK: Input: default@orc_table_2a
 #### A masked pattern was here ####
 3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -219,23 +145,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -243,23 +158,12 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinLeftSemiLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col0
                         input vertices:
                           1 Map 2
                         Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -267,49 +171,19 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: c:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: a (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        Group By Vectorization:
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            keyExpressions: col 0
-                            native: false
-                            projectedOutputColumns: []
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0
@@ -318,26 +192,9 @@ STAGE PLANS:
                           key expressions: _col0 (type: int)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: [0]
-                    dataColumns: a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -392,16 +249,12 @@ POSTHOOK: Input: default@values__tmp__table__4
 POSTHOOK: Output: default@orc_table_2b
 POSTHOOK: Lineage: orc_table_2b.c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: orc_table_2b.v2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -419,23 +272,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -443,14 +285,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col1, _col2
                         input vertices:
                           1 Map 2
@@ -458,16 +292,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: string), _col2 (type: int)
                           outputColumnNames: _col0, _col1
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 0]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -475,68 +302,26 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -555,16 +340,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -582,71 +363,32 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -654,24 +396,12 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [2, 0, 0, 1]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           0 Map 1
                         Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -679,20 +409,6 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
 
   Stage: Stage-0
     Fetch Operator
@@ -711,16 +427,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	3	3	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -738,23 +450,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -762,15 +463,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           1 Map 2
@@ -778,17 +470,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col2 (type: string), (_col3 * 2) (type: int), (_col0 * 5) (type: int), _col1 (type: string)
                           outputColumnNames: _col0, _col1, _col2, _col3
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 3, 4, 1]
-                              selectExpressions: LongColMultiplyLongScalar(col 0, val 2) -> 3:long, LongColMultiplyLongScalar(col 0, val 5) -> 4:long
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -796,68 +480,26 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String, bigint, bigint
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -876,16 +518,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	6	15	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -903,23 +541,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -927,15 +554,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0, 1, 2]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col2
                         input vertices:
                           1 Map 2
@@ -943,16 +561,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col2 (type: string), _col1 (type: string), _col0 (type: int)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 1, 0]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -960,68 +571,26 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -1040,16 +609,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	THREE	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1067,23 +632,12 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1091,15 +645,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [1, 2, 0]
-                            smallTableMapping: [2]
                         outputColumnNames: _col1, _col2, _col3
                         input vertices:
                           1 Map 2
@@ -1107,16 +652,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col3 (type: int), _col2 (type: string), _col1 (type: string)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0, 2, 1]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1124,68 +662,26 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -1204,16 +700,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 3	three	THREE
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1231,71 +723,32 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1303,15 +756,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [0, 1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [2, 0, 1]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col2, _col3
                         input vertices:
                           0 Map 1
@@ -1319,16 +763,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: string), _col3 (type: string), _col2 (type: int)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 1, 0]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1336,20 +773,6 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
 
   Stage: Stage-0
     Fetch Operator
@@ -1368,16 +791,12 @@ POSTHOOK: Input: default@orc_table_1b
 POSTHOOK: Input: default@orc_table_2b
 #### A masked pattern was here ####
 three	THREE	3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1395,71 +814,32 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 1, val 2) -> boolean
                     predicate: (a > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: v1 (type: string), a (type: int)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 2) -> boolean
                     predicate: (c > 2) (type: boolean)
                     Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: int), v2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -1467,15 +847,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0, 1]
-                            bigTableValueColumns: [1]
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [2, 0, 1]
-                            smallTableMapping: [2]
                         outputColumnNames: _col0, _col1, _col3
                         input vertices:
                           0 Map 1
@@ -1483,16 +854,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col1 (type: int), _col0 (type: string), _col3 (type: string)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [0, 2, 1]
                           Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1500,20 +864,6 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String
 
   Stage: Stage-0
     Fetch Operator


[48/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarColumnBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarColumnBase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarColumnBase.java
index 3e4a195..2162f17 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarColumnBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarColumnBase.java
@@ -131,10 +131,4 @@ public abstract class IfExprTimestampScalarColumnBase extends VectorExpression {
   public String getOutputType() {
     return "timestamp";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", col "+ arg3Column;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarScalarBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarScalarBase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarScalarBase.java
index 5273131..707f574 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarScalarBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampScalarScalarBase.java
@@ -117,10 +117,4 @@ public abstract class IfExprTimestampScalarScalarBase extends VectorExpression {
   public String getOutputType() {
     return "timestamp";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", val "+ arg3Scalar;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNotNull.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNotNull.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNotNull.java
index 2f6e7b9..f19551e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNotNull.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNotNull.java
@@ -107,11 +107,6 @@ public class IsNotNull extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNull.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNull.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNull.java
index 583ab7a..3169bae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNull.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IsNull.java
@@ -105,11 +105,6 @@ public class IsNull extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongColumn.java
index 6fa9779..33f50e0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongColumn.java
@@ -174,11 +174,6 @@ public class LongColDivideLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongScalar.java
index f26c8e1..68b6a87 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColDivideLongScalar.java
@@ -141,11 +141,6 @@ public class LongColDivideLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongColumn.java
index 3b3c923..a77d41a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongColumn.java
@@ -154,11 +154,6 @@ public class LongColEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongScalar.java
index c174d5f..be717a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColEqualLongScalar.java
@@ -135,10 +135,6 @@ public class LongColEqualLongScalar extends VectorExpression {
     this.outputColumn = outputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongColumn.java
index dd2c3dc..6ee5daf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongColumn.java
@@ -154,11 +154,6 @@ public class LongColGreaterEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongScalar.java
index 710ac23..cd8d723 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterEqualLongScalar.java
@@ -136,11 +136,6 @@ public class LongColGreaterEqualLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongColumn.java
index c8e07f2..053ced9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongColumn.java
@@ -154,11 +154,6 @@ public class LongColGreaterLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongScalar.java
index a234ae1..16148f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColGreaterLongScalar.java
@@ -136,11 +136,6 @@ public class LongColGreaterLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongColumn.java
index 8db8b86..25d52b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongColumn.java
@@ -154,11 +154,6 @@ public class LongColLessEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongScalar.java
index b06a876..927856f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessEqualLongScalar.java
@@ -136,11 +136,6 @@ public class LongColLessEqualLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongColumn.java
index b44e9bd..e6e54e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongColumn.java
@@ -154,11 +154,6 @@ public class LongColLessLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongScalar.java
index ada4312..ac10a83 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColLessLongScalar.java
@@ -136,11 +136,6 @@ public class LongColLessLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongColumn.java
index fa667ca..865fdb9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongColumn.java
@@ -154,11 +154,6 @@ public class LongColNotEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongScalar.java
index 7d16ae0..789ca3e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColNotEqualLongScalar.java
@@ -136,11 +136,6 @@ public class LongColNotEqualLongScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColumnInList.java
index babac22..fb15880 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongColumnInList.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -156,10 +154,6 @@ public class LongColumnInList extends VectorExpression implements ILongInExpr {
     this.inListValues = a;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", values " + Arrays.toString(inListValues);
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarDivideLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarDivideLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarDivideLongColumn.java
index b1958f2..3c442da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarDivideLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarDivideLongColumn.java
@@ -153,11 +153,6 @@ public class LongScalarDivideLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarEqualLongColumn.java
index a4cea31..09d845c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarEqualLongColumn.java
@@ -135,10 +135,6 @@ public class LongScalarEqualLongColumn extends VectorExpression {
     this.outputColumn = outputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterEqualLongColumn.java
index 15ba69b..afc80eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterEqualLongColumn.java
@@ -136,11 +136,6 @@ public class LongScalarGreaterEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterLongColumn.java
index 38984c5..e2e871d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarGreaterLongColumn.java
@@ -136,11 +136,6 @@ public class LongScalarGreaterLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessEqualLongColumn.java
index 47fb591..ae675ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessEqualLongColumn.java
@@ -135,10 +135,6 @@ public class LongScalarLessEqualLongColumn extends VectorExpression {
     this.outputColumn = outputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessLongColumn.java
index d5801d7..c664e35 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarLessLongColumn.java
@@ -136,11 +136,6 @@ public class LongScalarLessLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarNotEqualLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarNotEqualLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarNotEqualLongColumn.java
index b6bbfd1..776ab9f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarNotEqualLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongScalarNotEqualLongColumn.java
@@ -136,11 +136,6 @@ public class LongScalarNotEqualLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongToStringUnaryUDF.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongToStringUnaryUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongToStringUnaryUDF.java
index 80b79a4..fbca683 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongToStringUnaryUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/LongToStringUnaryUDF.java
@@ -135,11 +135,6 @@ abstract public class LongToStringUnaryUDF extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncDoubleToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncDoubleToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncDoubleToDouble.java
index b8e3489..f95c9ec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncDoubleToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncDoubleToDouble.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 public abstract class MathFuncDoubleToDouble extends VectorExpression {
   private static final long serialVersionUID = 1L;
 
-  protected int colNum;
+  private int colNum;
   private int outputColumn;
 
   // Subclasses must override this with a function that implements the desired logic.
@@ -133,9 +133,4 @@ public abstract class MathFuncDoubleToDouble extends VectorExpression {
   public String getOutputType() {
     return "double";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToDouble.java
index 3b55d06..4b4f38d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToDouble.java
@@ -133,9 +133,4 @@ public abstract class MathFuncLongToDouble extends VectorExpression {
   public String getOutputType() {
     return "double";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToLong.java
index 5e36c09..4b1f908 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathFuncLongToLong.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 public abstract class MathFuncLongToLong extends VectorExpression {
   private static final long serialVersionUID = 1L;
 
-  protected int colNum;
+  private int colNum;
   private int outputColumn;
 
   // Subclasses must override this with a function that implements the desired logic.
@@ -125,9 +125,4 @@ public abstract class MathFuncLongToLong extends VectorExpression {
   public String getOutputType() {
     return "long";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NotCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NotCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NotCol.java
index 1ece4a8..ea2a434 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NotCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/NotCol.java
@@ -122,11 +122,6 @@ public class NotCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModDoubleToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModDoubleToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModDoubleToDouble.java
index 0990095..39a3d87 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModDoubleToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModDoubleToDouble.java
@@ -55,11 +55,6 @@ public class PosModDoubleToDouble extends MathFuncDoubleToDouble
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", divisor " + divisor;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModLongToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModLongToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModLongToLong.java
index 4809011..12b7286 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModLongToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/PosModLongToLong.java
@@ -55,11 +55,6 @@ public class PosModLongToLong extends MathFuncLongToLong
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", divisor " + divisor;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/RoundWithNumDigitsDoubleToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/RoundWithNumDigitsDoubleToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/RoundWithNumDigitsDoubleToDouble.java
index 4b791b6..b8dfb41 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/RoundWithNumDigitsDoubleToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/RoundWithNumDigitsDoubleToDouble.java
@@ -59,11 +59,6 @@ public class RoundWithNumDigitsDoubleToDouble extends MathFuncDoubleToDouble
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", decimalPlaces " + decimalPlaces.get();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsFalse.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsFalse.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsFalse.java
index a906bef..77749e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsFalse.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsFalse.java
@@ -138,10 +138,6 @@ public class SelectColumnIsFalse extends VectorExpression {
     this.colNum1 = colNum1;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum1;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNotNull.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNotNull.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNotNull.java
index f8517dd..733e2a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNotNull.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNotNull.java
@@ -108,11 +108,6 @@ public class SelectColumnIsNotNull extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.FILTER)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNull.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNull.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNull.java
index b792bbe..7159178 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNull.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsNull.java
@@ -106,11 +106,6 @@ public class SelectColumnIsNull extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.FILTER)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsTrue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsTrue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsTrue.java
index b58b49e..f387a5c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsTrue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectColumnIsTrue.java
@@ -138,11 +138,6 @@ public class SelectColumnIsTrue extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.FILTER)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectStringColLikeStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectStringColLikeStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectStringColLikeStringScalar.java
index cb3870e..b914196 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectStringColLikeStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/SelectStringColLikeStringScalar.java
@@ -162,12 +162,8 @@ public class SelectStringColLikeStringScalar extends VectorExpression {
     return "String_Family";
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-@Override
-public Descriptor getDescriptor() {
+	@Override
+	public Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(
             VectorExpressionDescriptor.Mode.PROJECTION)
@@ -178,6 +174,6 @@ public Descriptor getDescriptor() {
         .setInputExpressionTypes(
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR).build();
-}
+	}
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringColumnInList.java
index b1ceb9a..b90e3c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringColumnInList.java
@@ -175,9 +175,4 @@ public class StringColumnInList extends VectorExpression implements IStringInExp
   public void setInListValues(byte [][] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupColConcatStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupColConcatStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupColConcatStringScalar.java
index bd44390..1cd3c46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupColConcatStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupColConcatStringScalar.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -151,11 +149,6 @@ public class StringGroupColConcatStringScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + new String(value, StandardCharsets.UTF_8);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
index 35666d8..56bc97b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringGroupConcatColCol.java
@@ -440,11 +440,6 @@ public class StringGroupConcatColCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringLength.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringLength.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringLength.java
index cdaf694..76602be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringLength.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringLength.java
@@ -156,10 +156,6 @@ public class StringLength extends VectorExpression {
     this.outputColumn = outputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringScalarConcatStringGroupCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringScalarConcatStringGroupCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringScalarConcatStringGroupCol.java
index b1e1dad..b98f72d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringScalarConcatStringGroupCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringScalarConcatStringGroupCol.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -151,11 +149,6 @@ public class StringScalarConcatStringGroupCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + new String(value, StandardCharsets.UTF_8) + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStart.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStart.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStart.java
index 305d1a7..75a99f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStart.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStart.java
@@ -245,11 +245,6 @@ public class StringSubstrColStart extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", start " + startIdx;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStartLen.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStartLen.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStartLen.java
index 4a7dbdc..0ff7af6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStartLen.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringSubstrColStartLen.java
@@ -272,11 +272,6 @@ public class StringSubstrColStartLen extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", start " + startIdx + ", length " + length;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDF.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDF.java
index 527d3b3..016a695 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDF.java
@@ -195,11 +195,6 @@ public class StringUnaryUDF extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDFDirect.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDFDirect.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDFDirect.java
index c87371f..89ef251 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDFDirect.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringUnaryUDFDirect.java
@@ -136,10 +136,6 @@ abstract public class StringUnaryUDFDirect extends VectorExpression {
     return "String";
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
index 4d06ad1..769c70a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StructColumnInList.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descriptor;
@@ -173,11 +172,4 @@ public class StructColumnInList extends StringColumnInList implements IStructInE
     }
     this.fieldVectorColumnTypes = fieldVectorColumnTypes;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "structExpressions " + Arrays.toString(structExpressions) +
-        ", fieldVectorColumnTypes " + Arrays.toString(fieldVectorColumnTypes) +
-        ", structColumnMap " + Arrays.toString(structColumnMap);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
index 5e76de8..bc09a3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
 import java.sql.Timestamp;
-import java.util.Arrays;
 import java.util.HashSet;
 
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
@@ -151,9 +150,4 @@ public class TimestampColumnInList extends VectorExpression implements ITimestam
   public void setInListValues(Timestamp[] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampToStringUnaryUDF.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampToStringUnaryUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampToStringUnaryUDF.java
index 32cf527..052d57c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampToStringUnaryUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampToStringUnaryUDF.java
@@ -133,10 +133,6 @@ abstract public class TimestampToStringUnaryUDF extends VectorExpression {
     return "String";
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorCoalesce.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorCoalesce.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorCoalesce.java
index c0870c8..543d7f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorCoalesce.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorCoalesce.java
@@ -141,11 +141,6 @@ public class VectorCoalesce extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "columns " + Arrays.toString(inputColumns);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
 
     // Descriptor is not defined because it takes variable number of arguments with different

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorElt.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorElt.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorElt.java
index 5e0e7aa..329d381 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorElt.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorElt.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
@@ -131,11 +129,6 @@ public class VectorElt extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "columns " + Arrays.toString(inputColumns);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     // Descriptor is not defined because it takes variable number of arguments with different
     // data types.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpression.java
index bf2d4ec..8fca8a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpression.java
@@ -22,7 +22,6 @@ import java.io.Serializable;
 import java.util.Map;
 
 import com.google.common.collect.ImmutableMap;
-
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 
@@ -139,42 +138,25 @@ public abstract class VectorExpression implements Serializable {
     return inputTypes;
   }
 
-  public String vectorExpressionParameters() {
-    return null;
-  }
-
   @Override
   public String toString() {
     StringBuilder b = new StringBuilder();
-    if (this instanceof IdentityExpression) {
-      b.append(vectorExpressionParameters());
-    } else {
-      b.append(this.getClass().getSimpleName());
-      String vectorExpressionParameters = vectorExpressionParameters();
-      if (vectorExpressionParameters != null) {
-        b.append("(");
-        b.append(vectorExpressionParameters);
-        b.append(")");
-      }
-      if (childExpressions != null) {
-        b.append("(children: ");
-        for (int i = 0; i < childExpressions.length; i++) {
-          b.append(childExpressions[i].toString());
-          if (i < childExpressions.length-1) {
-            b.append(", ");
-          }
+    b.append(this.getClass().getSimpleName());
+    b.append("[");
+    b.append(this.getOutputColumn());
+    b.append(":");
+    b.append(this.getOutputType());
+    b.append("]");
+    if (childExpressions != null) {
+      b.append("(");
+      for (int i = 0; i < childExpressions.length; i++) {
+        b.append(childExpressions[i].toString());
+        if (i < childExpressions.length-1) {
+          b.append(" ");
         }
-        b.append(")");
-      }
-      b.append(" -> ");
-      int outputColumn = getOutputColumn();
-      if (outputColumn != -1) {
-        b.append(outputColumn);
-        b.append(":");
       }
-      b.append(getOutputType());
+      b.append(")");
     }
-
     return b.toString();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColCol.java
index 00e9e03..4ce6e20 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColCol.java
@@ -216,11 +216,6 @@ public class VectorUDFDateAddColCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColScalar.java
index 730dc36..0e09f49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddColScalar.java
@@ -261,11 +261,6 @@ public class VectorUDFDateAddColScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + numDays;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddScalarCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddScalarCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddScalarCol.java
index 9787ade..724ea45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddScalarCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateAddScalarCol.java
@@ -205,11 +205,6 @@ public class VectorUDFDateAddScalarCol extends VectorExpression {
     this.isPositive = isPositive;
   }
 
-  @Override
-  public String vectorExpressionParameters() {
-    return "val " + stringValue + ", col " + colNum;
-  }
-
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColCol.java
index d3c5da2..4edf558 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColCol.java
@@ -358,11 +358,6 @@ public class VectorUDFDateDiffColCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColScalar.java
index 7e8c19b..71b3887 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffColScalar.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.io.Text;
 
-import java.nio.charset.StandardCharsets;
 import java.sql.Date;
 import java.sql.Timestamp;
 import java.text.ParseException;
@@ -298,11 +297,6 @@ public class VectorUDFDateDiffColScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + new String(stringValue, StandardCharsets.UTF_8);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffScalarCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffScalarCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffScalarCol.java
index 9f2d476..c733bc9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffScalarCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFDateDiffScalarCol.java
@@ -296,11 +296,6 @@ public class VectorUDFDateDiffScalarCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + stringValue + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldDate.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldDate.java
index 0255cfa..0a3a87a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldDate.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hive.common.util.DateUtils;
 
 import com.google.common.base.Preconditions;
 
@@ -151,15 +150,6 @@ public abstract class VectorUDFTimestampFieldDate extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    if (field == -1) {
-      return "col " + colNum;
-    } else {
-      return "col " + colNum + ", field " + DateUtils.getFieldName(field);
-    }
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldString.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldString.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldString.java
index 6719ce3..45e7a31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldString.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldString.java
@@ -177,15 +177,6 @@ public abstract class VectorUDFTimestampFieldString extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    if (fieldStart == -1) {
-      return "col " + colNum;
-    } else {
-      return "col " + colNum + ", fieldStart " + fieldStart + ", fieldLength " + fieldLength;
-    }
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
index e9000c6..5fca678 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hive.common.util.DateUtils;
 
 import com.google.common.base.Preconditions;
 
@@ -149,14 +148,6 @@ public abstract class VectorUDFTimestampFieldTimestamp extends VectorExpression
     this.outputColumn = outputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    if (field == -1) {
-      return "col " + colNum;
-    } else {
-      return "col " + colNum + ", field " + DateUtils.getFieldName(field);
-    }
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
index 0866f63..96e62cf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
@@ -22,7 +22,6 @@ import java.io.Serializable;
 
 import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -56,25 +55,7 @@ public abstract class VectorAggregateExpression  implements Serializable {
   public boolean hasVariableSize() {
     return false;
   }
-  public abstract VectorExpression inputExpression();
 
   public abstract void init(AggregationDesc desc) throws HiveException;
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(this.getClass().getSimpleName());
-    VectorExpression inputExpression = inputExpression();
-    if (inputExpression != null) {
-      sb.append("(");
-      sb.append(inputExpression.toString());
-      sb.append(") -> ");
-    } else {
-      sb.append("(*) -> ");
-    }
-    ObjectInspector outputObjectInspector = getOutputObjectInspector();
-    sb.append(outputObjectInspector.getTypeName());
-    return sb.toString();
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
index 05b76c7..d0ff5fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgDecimal.java
@@ -114,12 +114,6 @@ public class VectorUDAFAvgDecimal extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private Object[] partialResult;
     transient private LongWritable resultCount;
     transient private HiveDecimalWritable resultSum;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
index 483d9dc..d0a1d0d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFAvgTimestamp.java
@@ -84,12 +84,6 @@ public class VectorUDAFAvgTimestamp extends VectorAggregateExpression {
     }
     
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private Object[] partialResult;
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
index 494febc..cf373a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCount.java
@@ -60,12 +60,6 @@ public class VectorUDAFCount extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression = null;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private final LongWritable result;
 
     public VectorUDAFCount(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
index dec88cb..577977f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
@@ -61,12 +61,6 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression = null;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private final LongWritable result;
 
     public VectorUDAFCountMerge(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
index 337ba0a..72beda8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountStar.java
@@ -57,13 +57,6 @@ public class VectorUDAFCountStar extends VectorAggregateExpression {
       }
     }
 
-
-    @Override
-    public VectorExpression inputExpression() {
-      // None.
-      return null;
-    }
-
     transient private final LongWritable result;
 
     public VectorUDAFCountStar(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
index 8cd3506..fa25e6a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdPopTimestamp.java
@@ -83,12 +83,6 @@ public class VectorUDAFStdPopTimestamp extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
index 61d6977..b3e1fae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFStdSampTimestamp.java
@@ -83,12 +83,6 @@ public class VectorUDAFStdSampTimestamp extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
index 8a71ce3..3a5fef6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
@@ -88,12 +88,6 @@ public class VectorUDAFSumDecimal extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private final HiveDecimalWritable scratchDecimal;
 
     public VectorUDAFSumDecimal(VectorExpression inputExpression) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
index 2709b07..970ec22 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarPopTimestamp.java
@@ -83,12 +83,6 @@ public class VectorUDAFVarPopTimestamp extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
index 03dce1e..9af1a28 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFVarSampTimestamp.java
@@ -83,12 +83,6 @@ public class VectorUDAFVarSampTimestamp extends VectorAggregateExpression {
     }
 
     private VectorExpression inputExpression;
-
-    @Override
-    public VectorExpression inputExpression() {
-      return inputExpression;
-    }
-
     transient private LongWritable resultCount;
     transient private DoubleWritable resultSum;
     transient private DoubleWritable resultVariance;


[06/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 7d722d0..fbb43c4 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -12,14 +12,10 @@ POSTHOOK: Lineage: decimal_date_test.cdate EXPRESSION [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: decimal_date_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_date_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_date_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -36,65 +32,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnInList(col 3, values [-67, -171]) -> boolean
                     predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -107,14 +65,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -131,77 +85,31 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsFalse(col 4)(children: LongColumnInList(col 3, values [-67, -171, 20]) -> 4:boolean) -> boolean
                     predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -214,14 +122,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -238,65 +142,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> boolean
                     predicate: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -309,14 +175,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -333,77 +195,31 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsFalse(col 4)(children: DecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> 4:boolean) -> boolean
                     predicate: (not (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568)) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -416,14 +232,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -440,65 +252,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnBetween(col 3, left -2, right 1) -> boolean
                     predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -511,14 +285,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
+POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -535,65 +305,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColumnNotBetween(col 3, left -610, right 608) -> boolean
                     predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdate (type: date)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: date)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -606,14 +338,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
+POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -630,65 +358,27 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnBetween(col 1, left -20, right 45.9918918919) -> boolean
                     predicate: cdecimal1 BETWEEN -20 AND 45.9918918919 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdecimal1 (type: decimal(20,10))
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1]
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: decimal(20,10))
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(20,10))
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -701,14 +391,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
+POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -725,77 +411,31 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColumnNotBetween(col 1, left -2000, right 4390.1351351351) -> boolean
                     predicate: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: []
                       Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            native: false
-                            projectedOutputColumns: [0]
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1055,16 +695,12 @@ POSTHOOK: Input: default@decimal_date_test
 6172
 PREHOOK: query: -- projections
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
 POSTHOOK: query: -- projections
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1081,27 +717,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: LongColumnInList(col 3, values [-67, -171]) -> 4:boolean
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1110,50 +731,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1166,14 +757,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1190,27 +777,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: DecimalColumnInList(col 1, values [2365.8945945946, 881.0135135135, -3367.6517567568]) -> 4:boolean
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1219,50 +791,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1275,14 +817,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1299,27 +837,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: VectorUDFAdaptor(cdate BETWEEN 1969-12-30 AND 1970-01-02) -> 4:Long
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1328,50 +851,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1384,14 +877,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1408,27 +897,12 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_date_test
                   Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
-                        selectExpressions: VectorUDFAdaptor(cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351) -> 4:Long
                     Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:long) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 4
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1437,50 +911,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
index bf6c494..424a2c9 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
@@ -97,24 +97,20 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT 
+PREHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
   FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT 
+POSTHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
   FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,27 +128,12 @@ STAGE PLANS:
                 TableScan
                   alias: over1korc
                   Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Select Operator
                     expressions: i (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: avg(50), avg(50.0), avg(50)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 11:long) -> struct<count:bigint,sum:double>, VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 12:double) -> struct<count:bigint,sum:double>, VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 13:decimal(10,0)) -> struct<count:bigint,sum:decimal(20,0)>
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          keyExpressions: col 2
-                          native: false
-                          projectedOutputColumns: [0, 1, 2]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 11:long) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 12:double) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 13:decimal(10,0)) -> struct<count:bigint,sum:decimal(20,0)> output type STRUCT requires PRIMITIVE IS false
                       keys: _col0 (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
@@ -165,20 +146,7 @@ STAGE PLANS:
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
@@ -194,33 +162,16 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
index 943a4b1..3e551bb 100644
--- a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
@@ -121,16 +121,12 @@ POSTHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_lazy_binary_columnar
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -146,23 +142,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: CAST( t AS CHAR(10) (type: char(10)), CAST( si AS CHAR(10) (type: char(10)), CAST( i AS CHAR(20) (type: char(20)), CAST( b AS CHAR(30) (type: char(30)), CAST( f AS CHAR(20) (type: char(20)), CAST( d AS CHAR(20) (type: char(20)), CAST( s AS CHAR(50) (type: char(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19]
-                        selectExpressions: CastLongToChar(col 0, maxLength 10) -> 13:Char, CastLongToChar(col 1, maxLength 10) -> 14:Char, CastLongToChar(col 2, maxLength 20) -> 15:Char, CastLongToChar(col 3, maxLength 30) -> 16:Char, VectorUDFAdaptor(CAST( f AS CHAR(20)) -> 17:char(20), VectorUDFAdaptor(CAST( d AS CHAR(20)) -> 18:char(20), CastStringGroupToChar(col 8, maxLength 50) -> 19:Char
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -170,14 +155,6 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe
                           name: default.char_lazy_binary_columnar
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
index 4322dc7..73272fb 100644
--- a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
@@ -1231,18 +1231,14 @@ POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_web_page_sk SIMPLE [
 POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ]
 PREHOOK: query: ------------------------------------------------------------------------------------------
 
-explain vectorization expression
+explain
 select count(distinct ws_order_number) from web_sales
 PREHOOK: type: QUERY
 POSTHOOK: query: ------------------------------------------------------------------------------------------
 
-explain vectorization expression
+explain
 select count(distinct ws_order_number) from web_sales
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1260,24 +1256,11 @@ STAGE PLANS:
                 TableScan
                   alias: web_sales
                   Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]
                   Select Operator
                     expressions: ws_order_number (type: int)
                     outputColumnNames: ws_order_number
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [16]
                     Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 16
-                          native: false
-                          projectedOutputColumns: []
                       keys: ws_order_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -1286,88 +1269,35 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 1752000 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0) -> bigint
-                      className: VectorGroupByOperator
-                      vectorOutput: true
-                      native: false
-                      projectedOutputColumns: [0]
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: Uniform Hash IS false
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[23/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out b/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
index c21da5f..16603c7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
@@ -97,9 +97,9 @@ POSTHOOK: type: SHOWPARTITIONS
 POSTHOOK: Input: default@char_tbl2
 gpa=3    
 gpa=3.5  
-PREHOOK: query: explain vectorization select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
+PREHOOK: query: explain select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
+POSTHOOK: query: explain select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
index 25066be..b9ffa34 100644
--- a/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
@@ -1,4 +1,4 @@
-PREHOOK: query: explain vectorization 
+PREHOOK: query: explain 
 select count(*) from (select c.ctinyint 
 from alltypesorc c
 left outer join alltypesorc cd
@@ -7,7 +7,7 @@ left outer join alltypesorc hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization 
+POSTHOOK: query: explain 
 select count(*) from (select c.ctinyint 
 from alltypesorc c
 left outer join alltypesorc cd
@@ -16,10 +16,6 @@ left outer join alltypesorc hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -72,14 +68,6 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -96,14 +84,6 @@ STAGE PLANS:
                       Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -120,23 +100,8 @@ STAGE PLANS:
                       Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
index fa35da7..1163d24 100644
--- a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
@@ -80,16 +80,12 @@ POSTHOOK: Output: default@tjoin2
 POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
 POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
 POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -174,16 +170,12 @@ POSTHOOK: Input: default@tjoin2
 0	10	15	NULL
 1	20	25	NULL
 2	NULL	50	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -268,16 +260,12 @@ POSTHOOK: Input: default@tjoin2
 0	10	15	NULL
 1	20	25	NULL
 2	NULL	50	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -295,16 +283,9 @@ STAGE PLANS:
                 TableScan
                   alias: tjoin1
                   Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: rnum (type: int), c1 (type: int), c2 (type: int)
                     outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
                     Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -315,11 +296,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOuterFilteredOperator
-                          native: false
-                          nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false
                       outputColumnNames: _col0, _col1, _col2, _col4
                       input vertices:
                         1 Map 2
@@ -327,16 +303,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col4 (type: char(2))
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3]
                         Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -344,50 +313,23 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tjoin2
                   Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: c1 (type: int), c2 (type: char(2))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
                     Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -408,16 +350,12 @@ POSTHOOK: Input: default@tjoin2
 0	10	15	NULL
 1	20	25	NULL
 2	NULL	50	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -435,16 +373,9 @@ STAGE PLANS:
                 TableScan
                   alias: tjoin1
                   Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: rnum (type: int), c1 (type: int), c2 (type: int)
                     outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
                     Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -455,11 +386,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOuterFilteredOperator
-                          native: false
-                          nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          nativeConditionsNotMet: hive.vectorized.execution.mapjoin.native.enabled IS false
                       outputColumnNames: _col0, _col1, _col2, _col4
                       input vertices:
                         1 Map 2
@@ -467,16 +393,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col4 (type: char(2))
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3]
                         Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -484,50 +403,23 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tjoin2
                   Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: c1 (type: int), c2 (type: char(2))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
                     Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -548,16 +440,12 @@ POSTHOOK: Input: default@tjoin2
 0	10	15	NULL
 1	20	25	NULL
 2	NULL	50	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -575,16 +463,9 @@ STAGE PLANS:
                 TableScan
                   alias: tjoin1
                   Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: rnum (type: int), c1 (type: int), c2 (type: int)
                     outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
                     Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -595,10 +476,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                       outputColumnNames: _col0, _col1, _col2, _col4
                       input vertices:
                         1 Map 2
@@ -606,16 +483,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col4 (type: char(2))
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3]
                         Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -623,50 +493,23 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tjoin2
                   Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: c1 (type: int), c2 (type: char(2))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
                     Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -687,16 +530,12 @@ POSTHOOK: Input: default@tjoin2
 0	10	15	NULL
 1	20	25	NULL
 2	NULL	50	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -714,16 +553,9 @@ STAGE PLANS:
                 TableScan
                   alias: tjoin1
                   Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: rnum (type: int), c1 (type: int), c2 (type: int)
                     outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
                     Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -734,10 +566,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                       outputColumnNames: _col0, _col1, _col2, _col4
                       input vertices:
                         1 Map 2
@@ -745,16 +573,9 @@ STAGE PLANS:
                       Select Operator
                         expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col4 (type: char(2))
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Select Vectorization:
-                            className: VectorSelectOperator
-                            native: true
-                            projectedOutputColumns: [0, 1, 2, 3]
                         Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
                           Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -762,50 +583,23 @@ STAGE PLANS:
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: tjoin2
                   Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
                   Select Operator
                     expressions: c1 (type: int), c2 (type: char(2))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
                     Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator


[35/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
index 8e11112..735e4f4 100644
--- a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
@@ -43,110 +43,48 @@ POSTHOOK: Output: default@tbl2
 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
 ) subq1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
 ) subq1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=11 width=93)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=10 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=10 width=93)
+                    predicate:key is not null
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,b,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=10 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=10 width=93)
+                    predicate:key is not null
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
@@ -164,7 +102,7 @@ POSTHOOK: Input: default@tbl2
 #### A masked pattern was here ####
 22
 PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from
 (
   select key, count(*) from 
@@ -175,7 +113,7 @@ select count(*) from
 ) subq2
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from
 (
   select key, count(*) from 
@@ -185,149 +123,46 @@ select count(*) from
   group by key
 ) subq2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          keys: _col0 (type: int)
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            key expressions: _col0 (type: int)
-                            sort order: +
-                            Map-reduce partition columns: _col0 (type: int)
-                            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: []
-                  Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    aggregations: count()
-                    Group By Vectorization:
-                        aggregators: VectorUDAFCountStar(*) -> bigint
-                        className: VectorGroupByOperator
-                        vectorOutput: true
-                        native: false
-                        projectedOutputColumns: [0]
-                    mode: hash
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 3 vectorized, llap
+      File Output Operator [FS_31]
+        Group By Operator [GBY_30] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Reducer 2 [SIMPLE_EDGE] vectorized, llap
+          SHUFFLE [RS_29]
+            Group By Operator [GBY_28] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Select Operator [SEL_27] (rows=5 width=93)
+                Group By Operator [GBY_26] (rows=5 width=93)
+                  Output:["_col0"],keys:KEY._col0
+                <-Map 1 [SIMPLE_EDGE] llap
+                  SHUFFLE [RS_11]
+                    PartitionCols:_col0
+                    Group By Operator [GBY_10] (rows=11 width=93)
+                      Output:["_col0"],keys:_col0
+                      Merge Join Operator [MERGEJOIN_24] (rows=11 width=93)
+                        Conds:SEL_2._col0=SEL_5._col0(Inner),Output:["_col0"]
+                      <-Select Operator [SEL_5] (rows=10 width=93)
+                          Output:["_col0"]
+                          Filter Operator [FIL_23] (rows=10 width=93)
+                            predicate:key is not null
+                            TableScan [TS_3] (rows=10 width=93)
+                              default@tbl2,b,Tbl:COMPLETE,Col:NONE,Output:["key"]
+                      <-Select Operator [SEL_2] (rows=10 width=93)
+                          Output:["_col0"]
+                          Filter Operator [FIL_22] (rows=10 width=93)
+                            predicate:key is not null
+                            TableScan [TS_0] (rows=10 width=93)
+                              default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from
 (
@@ -356,7 +191,7 @@ POSTHOOK: Input: default@tbl2
 6
 PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
 -- Each sub-query should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from 
@@ -375,7 +210,7 @@ on src1.key = src2.key
 PREHOOK: type: QUERY
 POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
 -- Each sub-query should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from 
@@ -392,206 +227,71 @@ join
 ) src2
 on src1.key = src2.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+Reducer 6 <- Map 5 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
-        Reducer 6 <- Map 5 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          keys: _col0 (type: int)
-                          mode: hash
-                          outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            key expressions: _col0 (type: int)
-                            sort order: +
-                            Map-reduce partition columns: _col0 (type: int)
-                            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col1 (type: bigint)
-            Execution mode: llap
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          keys: _col0 (type: int)
-                          mode: hash
-                          outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            key expressions: _col0 (type: int)
-                            sort order: +
-                            Map-reduce partition columns: _col0 (type: int)
-                            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col1 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkLongOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-        Reducer 3 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col3
-                Statistics: Num rows: 5 Data size: 511 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 5 Data size: 511 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 5 Data size: 511 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkLongOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 3 llap
+      File Output Operator [FS_32]
+        Select Operator [SEL_31] (rows=5 width=102)
+          Output:["_col0","_col1","_col2"]
+          Merge Join Operator [MERGEJOIN_49] (rows=5 width=102)
+            Conds:RS_51._col0=RS_53._col0(Inner),Output:["_col0","_col1","_col3"]
+          <-Reducer 2 [SIMPLE_EDGE] vectorized, llap
+            SHUFFLE [RS_51]
+              PartitionCols:_col0
+              Group By Operator [GBY_50] (rows=5 width=93)
+                Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+              <-Map 1 [SIMPLE_EDGE] llap
+                SHUFFLE [RS_11]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_10] (rows=11 width=93)
+                    Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
+                    Merge Join Operator [MERGEJOIN_45] (rows=11 width=93)
+                      Conds:SEL_2._col0=SEL_5._col0(Inner),Output:["_col0"]
+                    <-Select Operator [SEL_5] (rows=10 width=93)
+                        Output:["_col0"]
+                        Filter Operator [FIL_42] (rows=10 width=93)
+                          predicate:key is not null
+                          TableScan [TS_3] (rows=10 width=93)
+                            default@tbl2,b,Tbl:COMPLETE,Col:NONE,Output:["key"]
+                    <-Select Operator [SEL_2] (rows=10 width=93)
+                        Output:["_col0"]
+                        Filter Operator [FIL_41] (rows=10 width=93)
+                          predicate:key is not null
+                          TableScan [TS_0] (rows=10 width=93)
+                            default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+          <-Reducer 6 [SIMPLE_EDGE] vectorized, llap
+            SHUFFLE [RS_53]
+              PartitionCols:_col0
+              Group By Operator [GBY_52] (rows=5 width=93)
+                Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
+              <-Map 5 [SIMPLE_EDGE] llap
+                SHUFFLE [RS_25]
+                  PartitionCols:_col0
+                  Group By Operator [GBY_24] (rows=11 width=93)
+                    Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
+                    Merge Join Operator [MERGEJOIN_47] (rows=11 width=93)
+                      Conds:SEL_16._col0=SEL_19._col0(Inner),Output:["_col0"]
+                    <-Select Operator [SEL_19] (rows=10 width=93)
+                        Output:["_col0"]
+                        Filter Operator [FIL_44] (rows=10 width=93)
+                          predicate:key is not null
+                          TableScan [TS_17] (rows=10 width=93)
+                            default@tbl2,b,Tbl:COMPLETE,Col:NONE,Output:["key"]
+                    <-Select Operator [SEL_16] (rows=10 width=93)
+                        Output:["_col0"]
+                        Filter Operator [FIL_43] (rows=10 width=93)
+                          predicate:key is not null
+                          TableScan [TS_14] (rows=10 width=93)
+                            default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
 (
@@ -639,7 +339,7 @@ POSTHOOK: Input: default@tbl2
 9	1	1
 PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
@@ -648,106 +348,44 @@ select count(*) from
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
   (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
   on subq1.key = subq2.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=3 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
@@ -770,7 +408,7 @@ POSTHOOK: Input: default@tbl2
 20
 PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join, although there is more than one level of sub-query
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -784,7 +422,7 @@ select count(*) from
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join, although there is more than one level of sub-query
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -796,99 +434,37 @@ select count(*) from
   join tbl2 b
   on subq2.key = b.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=1 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=1 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=1 width=93)
+                    predicate:((key < 8) and (key < 6))
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,b,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=1 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=1 width=93)
+                    predicate:((key < 8) and (key < 6))
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (
@@ -921,7 +497,7 @@ POSTHOOK: Input: default@tbl2
 20
 PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -942,7 +518,7 @@ select count(*) from
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -961,99 +537,37 @@ select count(*) from
   ) subq4
   on subq2.key = subq4.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=1 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=1 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=1 width=93)
+                    predicate:((key < 8) and (key < 6))
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=1 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=1 width=93)
+                    predicate:((key < 8) and (key < 6))
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (
@@ -1099,7 +613,7 @@ POSTHOOK: Input: default@tbl1
 PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
 -- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
 -- item, but that is not part of the join key.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
     join
@@ -1109,106 +623,44 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
 -- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
 -- item, but that is not part of the join key.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
     join
   (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
   on subq1.key = subq2.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 8) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 8) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=3 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=3 width=93)
+                    predicate:(key < 8)
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=3 width=93)
+                    predicate:(key < 8)
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
@@ -1231,7 +683,7 @@ POSTHOOK: Input: default@tbl2
 20
 PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
 -- join should be performed
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
     join
@@ -1240,172 +692,51 @@ select count(*) from
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
 -- join should be performed
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
     join
   (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
   on subq1.key = subq2.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2)(children: LongColAddLongScalar(col 0, val 1) -> 2:long) -> boolean
-                    predicate: (key + 1) is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: (key + 1) (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
-                          selectExpressions: LongColAddLongScalar(col 0, val 1) -> 2:long
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2)(children: LongColAddLongScalar(col 0, val 1) -> 2:long) -> boolean
-                    predicate: (key + 1) is not null (type: boolean)
-                    Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: (key + 1) (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
-                          selectExpressions: LongColAddLongScalar(col 0, val 1) -> 2:long
-                      Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int)
-                  1 _col0 (type: int)
-                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 3 vectorized, llap
+      File Output Operator [FS_29]
+        Group By Operator [GBY_28] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Reducer 2 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_21] (rows=11 width=93)
+                Conds:RS_24._col0=RS_27._col0(Inner)
+              <-Map 1 [SIMPLE_EDGE] vectorized, llap
+                SHUFFLE [RS_24]
+                  PartitionCols:_col0
+                  Select Operator [SEL_23] (rows=10 width=93)
+                    Output:["_col0"]
+                    Filter Operator [FIL_22] (rows=10 width=93)
+                      predicate:(key + 1) is not null
+                      TableScan [TS_0] (rows=10 width=93)
+                        default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Map 4 [SIMPLE_EDGE] vectorized, llap
+                SHUFFLE [RS_27]
+                  PartitionCols:_col0
+                  Select Operator [SEL_26] (rows=10 width=93)
+                    Output:["_col0"]
+                    Filter Operator [FIL_25] (rows=10 width=93)
+                      predicate:(key + 1) is not null
+                      TableScan [TS_3] (rows=10 width=93)
+                        default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
@@ -1428,111 +759,49 @@ POSTHOOK: Input: default@tbl2
 22
 PREHOOK: query: -- One of the tables is a sub-query and the other is not.
 -- It should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join tbl2 a on subq1.key = a.key
 PREHOOK: type: QUERY
 POSTHOOK: query: -- One of the tables is a sub-query and the other is not.
 -- It should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join tbl2 a on subq1.key = a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=3 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_17] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
@@ -1551,7 +820,7 @@ POSTHOOK: Input: default@tbl2
 20
 PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
 -- It should be converted to to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
@@ -1563,7 +832,7 @@ select count(*) from
 PREHOOK: type: QUERY
 POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
 -- It should be converted to to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
@@ -1573,112 +842,43 @@ select count(*) from
   (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
   on (subq1.key = subq3.key)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 6) (type: boolean)
-                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                             Inner Join 0 to 2
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                          2 _col0 (type: int)
-                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_32]
+        Group By Operator [GBY_31] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_15]
+            Group By Operator [GBY_14] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_28] (rows=6 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner),SEL_2._col0=SEL_8._col0(Inner)
+              <-Select Operator [SEL_5] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_26] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_3] (rows=10 width=93)
+                      default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_8] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_27] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_6] (rows=10 width=93)
+                      default@tbl2,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
+              <-Select Operator [SEL_2] (rows=3 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_25] (rows=3 width=93)
+                    predicate:(key < 6)
+                    TableScan [TS_0] (rows=10 width=93)
+                      default@tbl1,a,Tbl:COMPLETE,Col:NONE,Output:["key"]
 
 PREHOOK: query: select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
@@ -1707,7 +907,7 @@ POSTHOOK: Input: default@tbl2
 56
 PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select subq2.key as key, subq2.value as value1, b.value as value2 from
   (
@@ -1722,7 +922,7 @@ on subq2.key = b.key) a
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select subq2.key as key, subq2.value as value1, b.value as value2 from
   (
@@ -1735,99 +935,37 @@ select count(*) from (
 join tbl2 b
 on subq2.key = b.key) a
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
-                      Merge Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: count()
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Reducer 2 vectorized, llap
+      File Output Operator [FS_22]
+        Group By Operator [GBY_21] (rows=1 width=8)
+          Output:["_col0"],aggregations:["count(VALUE._col0)"]
+        <-Map 1 [SIMPLE_EDGE] llap
+          SHUFFLE [RS_11]
+            Group By Operator [GBY_10] (rows=1 width=8)
+              Output:["_col0"],aggregations:["count()"]
+              Merge Join Operator [MERGEJOIN_19] (rows=1 width=102)
+                Conds:SEL_2._col0=SEL_5._col0(Inner)
+              <-Select Operator [SEL_5] (rows=1 width=93)
+                  Output:["_col0"]
+                  Filter Operator [FIL_18] (rows=1 width=93)
+                    pred

<TRUNCATED>

[22/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
index f3ffee8..69911f5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
@@ -132,17 +132,91 @@ POSTHOOK: query: select * from t4
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t4
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization only summary
-
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -160,15 +234,91 @@ POSTHOOK: Input: default@t2
 10	val_10
 4	val_4
 8	val_8
-PREHOOK: query: explain vectorization only summary
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -188,15 +338,91 @@ POSTHOOK: Input: default@t2
 10	val_5
 4	val_2
 8	val_4
-PREHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -208,22 +434,102 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t4
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
-PREHOOK: Input: default@t3
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
 #### A masked pattern was here ####
-POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col1 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 15) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col1
+                      Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col1 (type: int), _col1 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col1 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col1 (type: int)
+                          Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t3
@@ -239,15 +545,91 @@ val_5
 val_5
 val_8
 val_9
-PREHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value < 'val_10') and key is not null) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -262,15 +644,95 @@ POSTHOOK: Input: default@t2
 0	val_0
 0	val_0
 0	val_0
-PREHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t3
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 5) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
 PREHOOK: type: QUERY
@@ -285,19 +747,99 @@ POSTHOOK: Input: default@t3
 val_10
 val_8
 val_9
-PREHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
 PREHOOK: Input: default@t2
 #### A masked pattern was here ####
 POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
@@ -305,15 +847,91 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization only summary
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -330,15 +948,91 @@ POSTHOOK: Input: default@t2
 10	val_5
 4	val_2
 8	val_4
-PREHOOK: query: explain vectorization only summary
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
 PREHOOK: type: QUERY
@@ -369,22 +1063,98 @@ POSTHOOK: Input: default@t3
 8
 8
 9
-PREHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
-PREHOOK: Input: default@t2
-#### A masked pattern was here ####
-POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 (2 * _col0) (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (2 * key) is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: (2 * _col0) (type: int)
+                          sort order: +
+                          Map-reduce partition columns: (2 * _col0) (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
@@ -393,15 +1163,115 @@ POSTHOOK: Input: default@t2
 0	val_0
 0	val_0
 8	val_8
-PREHOOK: query: explain vectorization only summary
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                           Left Semi Join 1 to 2
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0, _col1, _col5, _col6
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                        outputColumnNames: _col0, _col1, _col2, _col3
+                        Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col2 (type: int), _col3 (type: string)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -429,15 +1299,91 @@ POSTHOOK: Input: default@t3
 10	val_10	10	val_5
 4	val_4	4	val_2
 8	val_8	8	val_4
-PREHOOK: query: explain vectorization only summary
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int), value (type: string)
+                        1 _col0 (type: int), _col1 (type: string)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
 PREHOOK: type: QUERY
@@ -463,51 +1409,239 @@ POSTHOOK: Input: default@t3
 5	val_5
 8	val_8
 9	val_9
-PREHOOK: query: explain vectorization only summary
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                           Left Semi Join 0 to 2
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                         Left Semi Join 1 to 2
+                    keys:
+                      0 key (type: int)
+                      1 key (type: int)
+                      2 _col0 (type: int)
+                    outputColumnNames: _col0
+                    input vertices:
+                      1 Map 3
+                      2 Map 4
+                    Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1
-PREHOOK: Input: default@t2
-PREHOOK: Input: default@t3
-#### A masked pattern was here ####
-POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1
-POSTHOOK: Input: default@t2
-POSTHOOK: Input: default@t3
-#### A masked pattern was here ####
-0
-0
-0
-0
-0
-0
-10
-10
-10
-10
-4
-4
-8
-8
-PREHOOK: query: explain vectorization only summary
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
 PREHOOK: type: QUERY
@@ -547,15 +1681,105 @@ POSTHOOK: Input: default@t3
 4
 8
 8
-PREHOOK: query: explain vectorization only summary
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Left Semi Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
 PREHOOK: type: QUERY
@@ -598,15 +1822,105 @@ POSTHOOK: Input: default@t3
 NULL
 NULL
 NULL
-PREHOOK: query: explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Left Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
 PREHOOK: type: QUERY
@@ -649,15 +1963,105 @@ POSTHOOK: Input: default@t3
 4
 8
 8
-PREHOOK: query: explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: false
-  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Right Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
 PREHOOK: type: QUERY
@@ -702,15 +2106,105 @@ NULL
 NULL
 NULL
 NULL
-

<TRUNCATED>

[49/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSourceMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSourceMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSourceMapping.java
index 061e396..4f5ba9a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSourceMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnSourceMapping.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.exec.vector;
 
 import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOrderedMap.Mapping;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 /**
  * This class collects column information for copying a row from one VectorizedRowBatch to
@@ -36,9 +35,9 @@ public class VectorColumnSourceMapping extends VectorColumnMapping {
   }
 
   @Override
-  public void add(int sourceColumn, int outputColumn, TypeInfo typeInfo) {
+  public void add(int sourceColumn, int outputColumn, String typeName) {
     // Order on sourceColumn.
-    vectorColumnMapping.add(sourceColumn, outputColumn, typeInfo);
+    vectorColumnMapping.add(sourceColumn, outputColumn, typeName);
   }
 
   @Override
@@ -48,7 +47,7 @@ public class VectorColumnSourceMapping extends VectorColumnMapping {
     // Ordered columns are the source columns.
     sourceColumns = mapping.getOrderedColumns();
     outputColumns = mapping.getValueColumns();
-    typeInfos = mapping.getTypeInfos();
+    typeNames = mapping.getTypeNames();
 
     // Not needed anymore.
     vectorColumnMapping = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java
index 911aeb0..c8e0284 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorCopyRow.java
@@ -262,7 +262,8 @@ public class VectorCopyRow {
     for (int i = 0; i < count; i++) {
       int inputColumn = columnMapping.getInputColumns()[i];
       int outputColumn = columnMapping.getOutputColumns()[i];
-      TypeInfo typeInfo = columnMapping.getTypeInfos()[i];
+      String typeName = columnMapping.getTypeNames()[i].toLowerCase();
+      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
       Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
 
       CopyRow copyRowByValue = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
index bfe22b0..261246b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.VectorFilterDesc;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -51,8 +50,9 @@ public class VectorFilterOperator extends FilterOperator {
   public VectorFilterOperator(CompilationOpContext ctx,
       VectorizationContext vContext, OperatorDesc conf) throws HiveException {
     this(ctx);
+    ExprNodeDesc oldExpression = ((FilterDesc) conf).getPredicate();
+    conditionEvaluator = vContext.getVectorExpression(oldExpression, VectorExpressionDescriptor.Mode.FILTER);
     this.conf = (FilterDesc) conf;
-    conditionEvaluator = ((VectorFilterDesc) this.conf.getVectorDesc()).getPredicateExpression();
   }
 
   /** Kryo ctor. */

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
index fef7c2a..2605203 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc;
 import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc.ProcessingMode;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
@@ -66,8 +65,6 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
   private static final Logger LOG = LoggerFactory.getLogger(
       VectorGroupByOperator.class.getName());
 
-  private VectorGroupByDesc vectorDesc;
-
   /**
    * This is the vector of aggregators. They are stateless and only implement
    * the algorithm of how to compute the aggregation. state is kept in the
@@ -759,10 +756,16 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
     this(ctx);
     GroupByDesc desc = (GroupByDesc) conf;
     this.conf = desc;
-    vectorDesc = (VectorGroupByDesc) desc.getVectorDesc();
-    keyExpressions = vectorDesc.getKeyExpressions();
-    aggregators = vectorDesc.getAggregators();
-    isVectorOutput = vectorDesc.isVectorOutput();
+    List<ExprNodeDesc> keysDesc = desc.getKeys();
+    keyExpressions = vContext.getVectorExpressions(keysDesc);
+    ArrayList<AggregationDesc> aggrDesc = desc.getAggregators();
+    aggregators = new VectorAggregateExpression[aggrDesc.size()];
+    for (int i = 0; i < aggrDesc.size(); ++i) {
+      AggregationDesc aggDesc = aggrDesc.get(i);
+      aggregators[i] = vContext.getAggregatorExpression(aggDesc);
+    }
+
+    isVectorOutput = desc.getVectorDesc().isVectorOutput();
 
     vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames(),
         /* vContextEnvironment */ vContext);
@@ -831,7 +834,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
 
     forwardCache = new Object[outputKeyLength + aggregators.length];
 
-    switch (vectorDesc.getProcessingMode()) {
+    switch (conf.getVectorDesc().getProcessingMode()) {
     case GLOBAL:
       Preconditions.checkState(outputKeyLength == 0);
       processingMode = this.new ProcessingModeGlobalAggregate();
@@ -847,7 +850,7 @@ public class VectorGroupByOperator extends Operator<GroupByDesc> implements
       break;
     default:
       throw new RuntimeException("Unsupported vector GROUP BY processing mode " +
-          vectorDesc.getProcessingMode().name());
+          conf.getVectorDesc().getProcessingMode().name());
     }
     processingMode.initialize(hconf);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
index 6ea6122..f7fec8f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSelectOperator.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
-import org.apache.hadoop.hive.ql.plan.VectorSelectDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
@@ -46,11 +45,9 @@ public class VectorSelectOperator extends Operator<SelectDesc> implements
 
   private static final long serialVersionUID = 1L;
 
-  private VectorSelectDesc vectorDesc;
-
   protected VectorExpression[] vExpressions = null;
 
-  private int [] projectedOutputColumns = null;
+  private transient int [] projectedColumns = null;
 
   private transient VectorExpressionWriter [] valueWriters = null;
 
@@ -61,9 +58,13 @@ public class VectorSelectOperator extends Operator<SelectDesc> implements
       VectorizationContext vContext, OperatorDesc conf) throws HiveException {
     this(ctx);
     this.conf = (SelectDesc) conf;
-    vectorDesc = (VectorSelectDesc) this.conf.getVectorDesc();
-    vExpressions = vectorDesc.getSelectExpressions();
-    projectedOutputColumns = vectorDesc.getProjectedOutputColumns();
+    List<ExprNodeDesc> colList = this.conf.getColList();
+    vExpressions = new VectorExpression[colList.size()];
+    for (int i = 0; i < colList.size(); i++) {
+      ExprNodeDesc expr = colList.get(i);
+      VectorExpression ve = vContext.getVectorExpression(expr);
+      vExpressions[i] = ve;
+    }
 
     /**
      * Create a new vectorization context to create a new projection, but keep
@@ -72,10 +73,11 @@ public class VectorSelectOperator extends Operator<SelectDesc> implements
     vOutContext = new VectorizationContext(getName(), vContext);
 
     vOutContext.resetProjectionColumns();
-    List<String> outputColumnNames = this.conf.getOutputColumnNames();
-    for (int i=0; i < projectedOutputColumns.length; ++i) {
-      String columnName = outputColumnNames.get(i);
-      vOutContext.addProjectionColumn(columnName, projectedOutputColumns[i]);
+    for (int i=0; i < colList.size(); ++i) {
+      String columnName = this.conf.getOutputColumnNames().get(i);
+      VectorExpression ve = vExpressions[i];
+      vOutContext.addProjectionColumn(columnName,
+              ve.getOutputColumn());
     }
   }
 
@@ -108,6 +110,11 @@ public class VectorSelectOperator extends Operator<SelectDesc> implements
     List<String> outputFieldNames = conf.getOutputColumnNames();
     outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector(
         outputFieldNames, objectInspectors);
+
+    projectedColumns = new int [vExpressions.length];
+    for (int i = 0; i < projectedColumns.length; i++) {
+      projectedColumns[i] = vExpressions[i].getOutputColumn();
+    }
   }
 
   @Override
@@ -132,8 +139,8 @@ public class VectorSelectOperator extends Operator<SelectDesc> implements
     // Prepare output, set the projections
     int[] originalProjections = vrg.projectedColumns;
     int originalProjectionSize = vrg.projectionSize;
-    vrg.projectionSize = projectedOutputColumns.length;
-    vrg.projectedColumns = this.projectedOutputColumns;
+    vrg.projectionSize = vExpressions.length;
+    vrg.projectedColumns = this.projectedColumns;
     forward(vrg, outputObjInspector);
 
     // Revert the projected columns back, because vrg will be re-used.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 55881fb..f088941 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.*;
 import org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor;
 import org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFArgDesc;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -561,7 +560,7 @@ public class VectorizationContext {
             } else {
               throw new HiveException(
                   "Could not vectorize expression (mode = " + mode.name() + "): " + exprDesc.toString()
-                    + " because hive.vectorized.adaptor.usage.mode=chosen"
+                    + " because hive.vectorized.adaptor.usage.mode=chosen "
                     + " and the UDF wasn't one of the chosen ones");
             }
             break;
@@ -1263,35 +1262,6 @@ public class VectorizationContext {
     return "arguments: " + Arrays.toString(args) + ", argument classes: " + argClasses.toString();
   }
 
-  private static int STACK_LENGTH_LIMIT = 15;
-
-  public static String getStackTraceAsSingleLine(Throwable e) {
-    StringBuilder sb = new StringBuilder();
-    sb.append(e);
-    sb.append(" stack trace: ");
-    StackTraceElement[] stackTrace = e.getStackTrace();
-    int length = stackTrace.length;
-    boolean isTruncated = false;
-    if (length > STACK_LENGTH_LIMIT) {
-      length = STACK_LENGTH_LIMIT;
-      isTruncated = true;
-    }
-    for (int i = 0; i < length; i++) {
-      if (i > 0) {
-        sb.append(", ");
-      }
-      sb.append(stackTrace[i]);
-    }
-    if (isTruncated) {
-      sb.append(", ...");
-    }
-
-    // Attempt to cleanup stack trace elements that vary by VM.
-    String cleaned = sb.toString().replaceAll("GeneratedConstructorAccessor[0-9]*", "GeneratedConstructorAccessor<omitted>");
-
-    return cleaned;
-  }
-
   private VectorExpression instantiateExpression(Class<?> vclass, TypeInfo returnType, Object...args)
       throws HiveException {
     VectorExpression ve = null;
@@ -1303,14 +1273,14 @@ public class VectorizationContext {
         ve = (VectorExpression) ctor.newInstance();
       } catch (Exception ex) {
         throw new HiveException("Could not instantiate " + vclass.getSimpleName() + " with 0 arguments, exception: " +
-            getStackTraceAsSingleLine(ex));
+                    StringUtils.stringifyException(ex));
       }
     } else if (numParams == argsLength) {
       try {
         ve = (VectorExpression) ctor.newInstance(args);
       } catch (Exception ex) {
           throw new HiveException("Could not instantiate " + vclass.getSimpleName() + " with " + getNewInstanceArgumentString(args) + ", exception: " +
-              getStackTraceAsSingleLine(ex));
+                      StringUtils.stringifyException(ex));
       }
     } else if (numParams == argsLength + 1) {
       // Additional argument is needed, which is the outputcolumn.
@@ -1336,7 +1306,7 @@ public class VectorizationContext {
         ve.setOutputType(outType);
       } catch (Exception ex) {
           throw new HiveException("Could not instantiate " + vclass.getSimpleName() + " with arguments " + getNewInstanceArgumentString(newArgs) + ", exception: " +
-              getStackTraceAsSingleLine(ex));
+                      StringUtils.stringifyException(ex));
       }
     }
     // Add maxLength parameter to UDFs that have CHAR or VARCHAR output.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContextRegion.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContextRegion.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContextRegion.java
index 914bb1f..a403725 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContextRegion.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContextRegion.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.hive.ql.exec.vector;
 
 /**
  * VectorizationContextRegion optional interface implemented by vectorized operators 
- * that are changing the vectorization context (region boundary operators)
+ * that are changing the vectorizaiton context (region boundary operators)
  */
 public interface VectorizationContextRegion {
 
   VectorizationContext getOuputVectorizationContext();
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index e546a65..3e3844e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -38,10 +38,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.IOPrepareCache;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
index b49ff39..c50af8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
@@ -518,11 +518,6 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", pattern " + pattern;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToCharViaLongToChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToCharViaLongToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToCharViaLongToChar.java
index 96c08af..57dc92b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToCharViaLongToChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToCharViaLongToChar.java
@@ -51,9 +51,4 @@ public class CastBooleanToCharViaLongToChar extends CastBooleanToStringViaLongTo
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToVarCharViaLongToVarChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToVarCharViaLongToVarChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToVarCharViaLongToVarChar.java
index a120f2e..1f7697e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToVarCharViaLongToVarChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastBooleanToVarCharViaLongToVarChar.java
@@ -51,9 +51,4 @@ public class CastBooleanToVarCharViaLongToVarChar extends CastBooleanToStringVia
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToChar.java
index 447e258..187f12b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToChar.java
@@ -51,8 +51,4 @@ public class CastDateToChar extends CastDateToString implements TruncStringOutpu
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarChar.java
index 98c1f93..5ad745c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarChar.java
@@ -51,9 +51,4 @@ public class CastDateToVarChar extends CastDateToString implements TruncStringOu
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToChar.java
index 5ab22ea..aab3e70 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToChar.java
@@ -55,9 +55,4 @@ public class CastDecimalToChar extends CastDecimalToString implements TruncStrin
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java
index e1debcd..ea235d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToDecimal.java
@@ -148,11 +148,6 @@ public class CastDecimalToDecimal extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToVarChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToVarChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToVarChar.java
index b6a4f73..267b0b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToVarChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDecimalToVarChar.java
@@ -55,9 +55,4 @@ public class CastDecimalToVarChar extends CastDecimalToString implements TruncSt
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
index e38e32b..07f94f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
@@ -114,11 +114,6 @@ public class CastDoubleToTimestamp extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToChar.java
index eac45e4..27674c4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToChar.java
@@ -51,9 +51,4 @@ public class CastLongToChar extends CastLongToString implements TruncStringOutpu
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDate.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDate.java
index 86e0959..ceefd61 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToDate.java
@@ -94,11 +94,6 @@ public class CastLongToDate extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
index 9f71b9a..4de95a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
@@ -111,11 +111,6 @@ public class CastLongToTimestamp extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToVarChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToVarChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToVarChar.java
index 9bc1cdb..7c3dca2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToVarChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToVarChar.java
@@ -51,9 +51,4 @@ public class CastLongToVarChar extends CastLongToString implements TruncStringOu
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastMillisecondsLongToTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastMillisecondsLongToTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastMillisecondsLongToTimestamp.java
index 4cc120a..b1c6b2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastMillisecondsLongToTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastMillisecondsLongToTimestamp.java
@@ -112,11 +112,6 @@ public class CastMillisecondsLongToTimestamp extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToChar.java
index 3469183..7c06ff5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToChar.java
@@ -52,8 +52,4 @@ public class CastStringGroupToChar extends StringUnaryUDFDirect implements Trunc
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToVarChar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToVarChar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToVarChar.java
index fd4c76a..376ce92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToVarChar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringGroupToVarChar.java
@@ -52,9 +52,4 @@ public class CastStringGroupToVarChar extends StringUnaryUDFDirect implements Tr
   public void setMaxLength(int maxLength) {
     this.maxLength = maxLength;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn + ", maxLength " + maxLength;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDate.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDate.java
index 4b176ae..e456b12 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDate.java
@@ -149,11 +149,6 @@ public class CastStringToDate extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java
index 074f9aa..504b354 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDecimal.java
@@ -154,11 +154,6 @@ public class CastStringToDecimal extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalDayTime.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalDayTime.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalDayTime.java
index e577628..c8844c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalDayTime.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalDayTime.java
@@ -148,11 +148,6 @@ public class CastStringToIntervalDayTime extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalYearMonth.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalYearMonth.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalYearMonth.java
index 21b034a..62f3dc9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalYearMonth.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToIntervalYearMonth.java
@@ -146,11 +146,6 @@ public class CastStringToIntervalYearMonth extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToBoolean.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToBoolean.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToBoolean.java
index 0e23bfb..b8a58cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToBoolean.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToBoolean.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.MathExpr;
 import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 public class CastTimestampToBoolean extends VectorExpression {
   private static final long serialVersionUID = 1L;
@@ -123,11 +125,6 @@ public class CastTimestampToBoolean extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDouble.java
index 92595d9..a955d79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToDouble.java
@@ -118,11 +118,6 @@ public class CastTimestampToDouble extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToLong.java
index 466043e..ba2e823 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToLong.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.MathExpr;
 import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 public class CastTimestampToLong extends VectorExpression {
   private static final long serialVersionUID = 1L;
@@ -118,11 +120,6 @@ public class CastTimestampToLong extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColAndCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColAndCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColAndCol.java
index 42f9b60..ff7371d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColAndCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColAndCol.java
@@ -314,11 +314,6 @@ public class ColAndCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColOrCol.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColOrCol.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColOrCol.java
index 297c372..60ed2d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColOrCol.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ColOrCol.java
@@ -317,11 +317,6 @@ public class ColOrCol extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
index 487c4b0..72749b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/ConstantVectorExpression.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
 import java.sql.Timestamp;
 
 import org.apache.hadoop.hive.common.type.HiveDecimal;
@@ -269,38 +268,6 @@ public class ConstantVectorExpression extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    String value;
-    if (isNullValue) {
-      value = "null";
-    } else {
-      switch (type) {
-      case LONG:
-        value = Long.toString(longValue);
-        break;
-      case DOUBLE:
-        value = Double.toString(doubleValue);
-        break;
-      case BYTES:
-        value = new String(bytesValue, StandardCharsets.UTF_8);
-        break;
-      case DECIMAL:
-        value = decimalValue.toString();
-        break;
-      case TIMESTAMP:
-        value = timestampValue.toString();
-        break;
-      case INTERVAL_DAY_TIME:
-        value = intervalDayTimeValue.toString();
-        break;
-      default:
-        throw new RuntimeException("Unknown vector column type " + type);
-      }
-    }
-    return "val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder()).build();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateColumn.java
index e04280f..fafacce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateColumn.java
@@ -168,10 +168,6 @@ public class DateColSubtractDateColumn extends VectorExpression {
     return "timestamp";
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum1 + ", col " + colNum2;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateScalar.java
index bce24ea..a9ca93c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateColSubtractDateScalar.java
@@ -132,11 +132,6 @@ public class DateColSubtractDateScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateScalarSubtractDateColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateScalarSubtractDateColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateScalarSubtractDateColumn.java
index 62f29f1..59cf9da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateScalarSubtractDateColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DateScalarSubtractDateColumn.java
@@ -135,11 +135,6 @@ public class DateScalarSubtractDateColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java
index 9873303..0601c66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalColumnInList.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
-import java.util.Arrays;
 import java.util.HashSet;
 
 /**
@@ -153,10 +152,4 @@ public class DecimalColumnInList extends VectorExpression implements IDecimalInE
   public void setInListValues(HiveDecimal[] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalToStringUnaryUDF.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalToStringUnaryUDF.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalToStringUnaryUDF.java
index a9e1f8b..4b1182c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalToStringUnaryUDF.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalToStringUnaryUDF.java
@@ -134,11 +134,6 @@ abstract public class DecimalToStringUnaryUDF extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DoubleColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DoubleColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DoubleColumnInList.java
index db65460..dfc1aff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DoubleColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DoubleColumnInList.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
@@ -158,11 +156,6 @@ public class DoubleColumnInList extends VectorExpression implements IDoubleInExp
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", values " + Arrays.toString(inListValues);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
 
     // return null since this will be handled as a special case in VectorizationContext

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColAndScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColAndScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColAndScalar.java
index 578feb0..bef1c18 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColAndScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColAndScalar.java
@@ -73,11 +73,6 @@ public class FilterColAndScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColOrScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColOrScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColOrScalar.java
index 72f58b1..ee0ac69 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColOrScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterColOrScalar.java
@@ -73,11 +73,6 @@ public class FilterColOrScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", val " + value;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java
index 9ebc5ac..a865343 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDecimalColumnInList.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descript
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
-import java.util.Arrays;
 import java.util.HashSet;
 
 /**
@@ -172,10 +171,4 @@ public class FilterDecimalColumnInList extends VectorExpression implements IDeci
   public void setInListValues(HiveDecimal[] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDoubleColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDoubleColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDoubleColumnInList.java
index 0252236..05dcb43 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDoubleColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterDoubleColumnInList.java
@@ -177,10 +177,4 @@ public class FilterDoubleColumnInList extends VectorExpression implements IDoubl
   public void setInListValues(double [] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprAndExpr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprAndExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprAndExpr.java
index 175b497..41e3b0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprAndExpr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprAndExpr.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 
@@ -53,12 +51,6 @@ public class FilterExprAndExpr extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    // The children are input.
-    return null;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
 
     // IMPORTANT NOTE: For Multi-AND, the VectorizationContext class will catch cases with 3 or

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprOrExpr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprOrExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprOrExpr.java
index 5ed1ed8..dc5139d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprOrExpr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterExprOrExpr.java
@@ -232,12 +232,6 @@ public class FilterExprOrExpr extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    // The children are input.
-    return null;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
 
     // IMPORTANT NOTE: For Multi-OR, the VectorizationContext class will catch cases with 3 or

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterLongColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterLongColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterLongColumnInList.java
index dce1b43..a40f39c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterLongColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterLongColumnInList.java
@@ -177,11 +177,4 @@ public class FilterLongColumnInList extends VectorExpression implements ILongInE
   public void setInListValues(long [] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
-
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarAndColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarAndColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarAndColumn.java
index 7092f4b..bcc8f89 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarAndColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarAndColumn.java
@@ -73,11 +73,6 @@ public class FilterScalarAndColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarOrColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarOrColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarOrColumn.java
index ab242ae..f515e60 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarOrColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterScalarOrColumn.java
@@ -73,11 +73,6 @@ public class FilterScalarOrColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "val " + value + ", col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColumnInList.java
index 200735c..e34ec75 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColumnInList.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -181,9 +179,4 @@ public class FilterStringColumnInList extends VectorExpression implements IStrin
   public void setInListValues(byte [][] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
index f65bd97..70b393c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStructColumnInList.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -177,12 +176,4 @@ public class FilterStructColumnInList extends FilterStringColumnInList implement
     }
     this.fieldVectorColumnTypes = fieldVectorColumnTypes;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "structExpressions " + Arrays.toString(structExpressions) +
-        ", fieldVectorColumnTypes " + Arrays.toString(fieldVectorColumnTypes) +
-        ", structColumnMap " + Arrays.toString(structColumnMap);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterTimestampColumnInList.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterTimestampColumnInList.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterTimestampColumnInList.java
index a7666bc..25a276a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterTimestampColumnInList.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterTimestampColumnInList.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
 import java.sql.Timestamp;
-import java.util.Arrays;
 import java.util.HashSet;
 
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
@@ -170,10 +169,4 @@ public class FilterTimestampColumnInList extends VectorExpression implements ITi
   public void setInListValues(Timestamp[] a) {
     this.inListValues = a;
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol + ", values " + Arrays.toString(inListValues);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToDouble.java
index 76fdeb5..cc6afa5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToDouble.java
@@ -134,11 +134,6 @@ public abstract class FuncDecimalToDouble extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java
index 9fe3010..4691fe1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToLong.java
@@ -122,11 +122,6 @@ public abstract class FuncDecimalToLong extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToTimestamp.java
index 569d7f7..561c152 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDecimalToTimestamp.java
@@ -122,11 +122,6 @@ public abstract class FuncDecimalToTimestamp extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java
index 1b3127c..0120c0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncDoubleToDecimal.java
@@ -130,10 +130,6 @@ public abstract class FuncDoubleToDecimal extends VectorExpression {
     this.inputColumn = inputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java
index b527482..b73e851 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToDecimal.java
@@ -130,10 +130,6 @@ public abstract class FuncLongToDecimal extends VectorExpression {
     this.inputColumn = inputColumn;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToString.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToString.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToString.java
index db45ed4..fa0a746 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToString.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncLongToString.java
@@ -143,10 +143,6 @@ public abstract class FuncLongToString extends VectorExpression {
     return "String";
   }
 
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputCol;
-  }
 
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
index 9eead7b..a18bb55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
@@ -120,10 +120,6 @@ public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression {
     return outputType;
   }
 
-  public String vectorExpressionParameters() {
-    return "col " + colNum + ", decimalPlaces " + decimalPlaces;
-  }
-
   @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToDecimal.java
index 5f4e83a..774551c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToDecimal.java
@@ -131,11 +131,6 @@ public abstract class FuncTimestampToDecimal extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToLong.java
index b652226..b84d9be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncTimestampToLong.java
@@ -132,11 +132,6 @@ public abstract class FuncTimestampToLong extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + inputColumn;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     VectorExpressionDescriptor.Builder b = new VectorExpressionDescriptor.Builder();
     b.setMode(VectorExpressionDescriptor.Mode.PROJECTION)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java
index 2385a40..402d0f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IdentityExpression.java
@@ -81,11 +81,6 @@ public class IdentityExpression extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + colNum;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder()).build();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprDoubleColumnDoubleColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprDoubleColumnDoubleColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprDoubleColumnDoubleColumn.java
index 514b453..f0f4f6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprDoubleColumnDoubleColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprDoubleColumnDoubleColumn.java
@@ -167,11 +167,6 @@ public class IfExprDoubleColumnDoubleColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
index 98fa29e..804923e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnColumn.java
@@ -136,11 +136,6 @@ public class IfExprIntervalDayTimeColumnColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnScalar.java
index 9dc3669..8face7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeColumnScalar.java
@@ -130,11 +130,6 @@ public class IfExprIntervalDayTimeColumnScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", val "+ arg3Scalar.toString();
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarColumn.java
index 4d4649f..40f2e08 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarColumn.java
@@ -132,11 +132,6 @@ public class IfExprIntervalDayTimeScalarColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarScalar.java
index c8f3294..43676dd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprIntervalDayTimeScalarScalar.java
@@ -120,11 +120,6 @@ public class IfExprIntervalDayTimeScalarScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ arg2Scalar + ", val "+ arg3Scalar;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprLongColumnLongColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprLongColumnLongColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprLongColumnLongColumn.java
index 4c6015e..06ba8f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprLongColumnLongColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprLongColumnLongColumn.java
@@ -166,11 +166,6 @@ public class IfExprLongColumnLongColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringGroupColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringGroupColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringGroupColumn.java
index c8367c6..ca11a55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringGroupColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringGroupColumn.java
@@ -177,11 +177,6 @@ public class IfExprStringGroupColumnStringGroupColumn extends VectorExpression {
   }	
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringScalar.java
index 8b18ae0..4e09448 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringGroupColumnStringScalar.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -166,11 +164,6 @@ public class IfExprStringGroupColumnStringScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", val "+ new String(arg3Scalar, StandardCharsets.UTF_8);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringGroupColumn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringGroupColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringGroupColumn.java
index 3a0c035..79ed71e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringGroupColumn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringGroupColumn.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -166,11 +164,6 @@ public class IfExprStringScalarStringGroupColumn extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ new String(arg2Scalar, StandardCharsets.UTF_8) + ", col "+ arg3Column;
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringScalar.java
index 4a51693..2a35970 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprStringScalarStringScalar.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
-import java.nio.charset.StandardCharsets;
-
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
@@ -135,11 +133,6 @@ public class IfExprStringScalarStringScalar extends VectorExpression {
   }
 
   @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", val "+ new String(arg2Scalar, StandardCharsets.UTF_8) + ", val "+ new String(arg3Scalar, StandardCharsets.UTF_8);
-  }
-
-  @Override
   public VectorExpressionDescriptor.Descriptor getDescriptor() {
     return (new VectorExpressionDescriptor.Builder())
         .setMode(

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnColumnBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnColumnBase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnColumnBase.java
index 8219b3c..8441863 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnColumnBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnColumnBase.java
@@ -133,9 +133,4 @@ public abstract class IfExprTimestampColumnColumnBase extends VectorExpression {
   public String getOutputType() {
     return "long";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", col "+ arg3Column;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnScalarBase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnScalarBase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnScalarBase.java
index eb0c1c0..6b87ff2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnScalarBase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/IfExprTimestampColumnScalarBase.java
@@ -130,10 +130,4 @@ public abstract class IfExprTimestampColumnScalarBase extends VectorExpression {
   public String getOutputType() {
     return "timestamp";
   }
-
-  @Override
-  public String vectorExpressionParameters() {
-    return "col " + arg1Column + ", col "+ arg2Column + ", val "+ arg3Scalar;
-  }
-
 }
\ No newline at end of file


[39/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
index 1511298..0a01b8c 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
@@ -149,55 +149,25 @@ POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).insert_num SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).s1 SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:s1, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct1_c_txt.insert_num	complex_struct1_c_txt.s1	complex_struct1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_struct1
-                  Statistics: Num rows: 6 Data size: 4734 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), s1 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s1] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=6 width=789)
+            default@part_change_various_various_struct1,part_change_various_various_struct1,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","s1","b"]
 
 PREHOOK: query: select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
@@ -443,55 +413,25 @@ POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).b SIMPLE [
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).insert_num SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).s2 SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:s2, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct2_d_txt.insert_num	complex_struct2_d_txt.b	complex_struct2_d_txt.s2
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_various_various_struct2
-                  Statistics: Num rows: 8 Data size: 4912 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s2 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s2] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=8 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=8 width=614)
+            default@part_add_various_various_struct2,part_add_various_various_struct2,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s2"]
 
 PREHOOK: query: select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
@@ -661,55 +601,25 @@ POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).insert_num SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).s3 SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:s3, type:struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>, comment:null), ]
 complex_struct4_c_txt.insert_num	complex_struct4_c_txt.b	complex_struct4_c_txt.s3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_to_various_various_struct4
-                  Statistics: Num rows: 4 Data size: 1172 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s3 (type: struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary> of Column[s3] not supported
-                vectorized: false
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=4 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=4 width=293)
+            default@part_add_to_various_various_struct4,part_add_to_various_various_struct4,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s3"]
 
 PREHOOK: query: select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
index c89be06..d240f82 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
@@ -278,73 +278,25 @@ POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	_c54
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_boolean_to_bigint
-                  Statistics: Num rows: 10 Data size: 11688 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 (type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 (type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 (type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 (type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 55, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 55
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    dataColumns: insert_num:int, c1:boolean, c2:boolean, c3:boolean, c4:boolean, c5:boolean, c6:boolean, c7:boolean, c8:boolean, c9:boolean, c10:tinyint, c11:tinyint, c12:tinyint, c13:tinyint, c14:tinyint, c15:tinyint, c16:tinyint, c17:tinyint, c18:tinyint, c19:tinyint, c20:tinyint, c21:smallint, c22:smallint, c23:smallint, c24:smallint, c25:smallint, c26:smallint, c27:smallint, c28:smallint, c29:smallint, c30:smallint, c31:smallint, c32:int, c33:int, c34:int, c35:int, c36:int, c37:int, c38:int, c39:int, c40:int, c41:int, c42:int, c43:bigint, c44:bigint, c45:bigint, c46:bigint, c47:bigint, c48:bigint, c49:bigint, c50:bigint, c51:bigint, c52:bigint, c53:bigint, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=10 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
+          TableScan [TS_0] (rows=10 width=1168)
+            default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -541,73 +493,25 @@ POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	_c34
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_decimal_to_double
-                  Statistics: Num rows: 6 Data size: 8295 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 (type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 (type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 (type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 (type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 (type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 (type: double), c33 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 35, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 35
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    dataColumns: insert_num:int, c1:decimal(38,18), c2:decimal(38,18), c3:decimal(38,18), c4:decimal(38,18), c5:decimal(38,18), c6:decimal(38,18), c7:decimal(38,18), c8:decimal(38,18), c9:decimal(38,18), c10:decimal(38,18), c11:decimal(38,18), c12:float, c13:float, c14:float, c15:float, c16:float, c17:float, c18:float, c19:float, c20:float, c21:float, c22:float, c23:double, c24:double, c25:double, c26:double, c27:double, c28:double, c29:double, c30:double, c31:double, c32:double, c33:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
+          TableScan [TS_0] (rows=6 width=1382)
+            default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -720,73 +624,25 @@ POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	_c13
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_timestamp
-                  Statistics: Num rows: 6 Data size: 2965 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: timestamp), c12 (type: timestamp), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 14, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 14
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    dataColumns: insert_num:int, c1:timestamp, c2:timestamp, c3:timestamp, c4:timestamp, c5:timestamp, c6:timestamp, c7:timestamp, c8:timestamp, c9:timestamp, c10:timestamp, c11:timestamp, c12:timestamp, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
+          TableScan [TS_0] (rows=6 width=494)
+            default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -883,73 +739,25 @@ POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	date1	date1	date1	date1	_c5
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_date
-                  Statistics: Num rows: 6 Data size: 2444 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 6, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 6
-                    includeColumns: [0, 1, 2, 3, 4, 5]
-                    dataColumns: insert_num:int, c1:date, c2:date, c3:date, c4:date, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+          TableScan [TS_0] (rows=6 width=407)
+            default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
@@ -1127,73 +935,25 @@ POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).insert_num SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 same_type1_c_txt.insert_num	same_type1_c_txt.c1	same_type1_c_txt.c2	same_type1_c_txt.c3	same_type1_c_txt.c4	same_type1_c_txt.c5	same_type1_c_txt.c6	same_type1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_same_type_different_params
-                  Statistics: Num rows: 13 Data size: 8736 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 1, 2, 3, 4, 5, 6, 7]
-                    Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 8
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                    dataColumns: insert_num:int, c1:char(8), c2:char(32), c3:varchar(15), c4:varchar(18), c5:decimal(10,2), c6:decimal(25,15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=13 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+          TableScan [TS_0] (rows=13 width=672)
+            default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
index 5ef34e5..5e99743 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
@@ -83,72 +83,25 @@ POSTHOOK: Lineage: table_add_int_permute_select.b SIMPLE [(values__tmp__table__1
 POSTHOOK: Lineage: table_add_int_permute_select.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_permute_select.insert_num EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_permute_select
-                  Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=99)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=99)
+            default@table_add_int_permute_select,table_add_int_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_permute_select
@@ -255,72 +208,25 @@ POSTHOOK: Lineage: table_add_int_string_permute_select.c EXPRESSION [(values__tm
 POSTHOOK: Lineage: table_add_int_string_permute_select.d SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_string_permute_select.insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_string_permute_select
-                  Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=99)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=99)
+            default@table_add_int_string_permute_select,table_add_int_string_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_string_permute_select
@@ -497,72 +403,25 @@ POSTHOOK: Lineage: table_change_string_group_double.c2 EXPRESSION [(values__tmp_
 POSTHOOK: Lineage: table_change_string_group_double.c3 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_string_group_double.insert_num EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 2110 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 2110 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 2110 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=422)
+          Output:["_col0","_col1","_col2","_col3","_col4"]
+          TableScan [TS_0] (rows=5 width=422)
+            default@table_change_string_group_double,table_change_string_group_double,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
@@ -832,72 +691,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_gro
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 5 Data size: 820 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 5 Data size: 820 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 820 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=164)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21"]
+          TableScan [TS_0] (rows=5 width=164)
+            default@table_change_numeric_group_string_group_multi_ints_string_group,table_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1052,72 +864,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 5 Data size: 2940 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 5 Data size: 2940 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 2940 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=588)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
+          TableScan [TS_0] (rows=5 width=588)
+            default@table_change_numeric_group_string_group_floating_string_group,table_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY


[27/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
index e7d1963..d8003ba 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
@@ -39,7 +39,7 @@ POSTHOOK: Lineage: vector_interval_1.str1 EXPRESSION []
 POSTHOOK: Lineage: vector_interval_1.str2 EXPRESSION []
 POSTHOOK: Lineage: vector_interval_1.ts EXPRESSION []
 PREHOOK: query: -- constants/cast from string
-explain vectorization expression
+explain
 select
   str1,
   interval '1-2' year to month, interval_year_month(str1),
@@ -47,17 +47,13 @@ select
 from vector_interval_1 order by str1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- constants/cast from string
-explain vectorization expression
+explain
 select
   str1,
   interval '1-2' year to month, interval_year_month(str1),
   interval '1 2:3:4' day to second, interval_day_time(str2)
 from vector_interval_1 order by str1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -75,62 +71,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: str1 (type: string), CAST( str1 AS INTERVAL YEAR TO MONTH) (type: interval_year_month), CAST( str2 AS INTERVAL DAY TO SECOND) (type: interval_day_time)
                     outputColumnNames: _col0, _col2, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 4, 5]
-                        selectExpressions: CastStringToIntervalYearMonth(col 2) -> 4:interval_year_month, CastStringToIntervalDayTime(col 3) -> 5:interval_day_time
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_year_month), _col4 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), 1-2 (type: interval_year_month), VALUE._col0 (type: interval_year_month), 1 02:03:04.000000000 (type: interval_day_time), VALUE._col1 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 3, 1, 4, 2]
-                    selectExpressions: ConstantVectorExpression(val 14) -> 3:long, ConstantVectorExpression(val 1 02:03:04.000000000) -> 4:interval_day_time
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -162,7 +122,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	1-2	NULL	1 02:03:04.000000000	NULL
 1-2	1-2	1-2	1 02:03:04.000000000	1 02:03:04.000000000
 PREHOOK: query: -- interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   interval '1-2' year to month + interval '1-2' year to month,
@@ -174,7 +134,7 @@ select
 from vector_interval_1 order by dt
 PREHOOK: type: QUERY
 POSTHOOK: query: -- interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   interval '1-2' year to month + interval '1-2' year to month,
@@ -185,10 +145,6 @@ select
   interval '1-2' year to month - interval_year_month(str1)
 from vector_interval_1 order by dt
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -206,62 +162,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month)
                     outputColumnNames: _col0, _col2, _col3, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 6, 5, 8, 7]
-                        selectExpressions: IntervalYearMonthColAddIntervalYearMonthColumn(col 4, col 5)(children: CastStringToIntervalYearMonth(col 2) -> 4:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 6:long, IntervalYearMonthScalarAddIntervalYearMonthColumn(val 14, col 4)(children: CastStringToIntervalYearMonth(col 2) -> 4:interval_year_month) -> 5:long, IntervalYearMonthColSubtractIntervalYearMonthColumn(col 4, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 4:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 8:long, IntervalYearMonthScalarSubtractIntervalYearMonthColumn(val 14, col 4)(children: CastStringToIntervalYearMonth(col 2) -> 4:interval_year_month) -> 7:long
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), 2-4 (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), 0-0 (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 5, 1, 2, 6, 3, 4]
-                    selectExpressions: ConstantVectorExpression(val 28) -> 5:long, ConstantVectorExpression(val 0) -> 6:long
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -300,7 +220,7 @@ POSTHOOK: Input: default@vector_interval_1
 #### A masked pattern was here ####
 NULL	2-4	NULL	NULL	0-0	NULL	NULL
 2001-01-01	2-4	2-4	2-4	0-0	0-0	0-0
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   dt,
   interval '1 2:3:4' day to second + interval '1 2:3:4' day to second,
@@ -311,7 +231,7 @@ select
   interval '1 2:3:4' day to second - interval_day_time(str2)
 from vector_interval_1 order by dt
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   dt,
   interval '1 2:3:4' day to second + interval '1 2:3:4' day to second,
@@ -322,10 +242,6 @@ select
   interval '1 2:3:4' day to second - interval_day_time(str2)
 from vector_interval_1 order by dt
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -343,62 +259,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: dt (type: date), (CAST( str2 AS INTERVAL DAY TO SECOND) + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (CAST( str2 AS INTERVAL DAY TO SECOND) - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time)
                     outputColumnNames: _col0, _col2, _col3, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 6, 5, 8, 7]
-                        selectExpressions: IntervalDayTimeColAddIntervalDayTimeColumn(col 4, col 5)(children: CastStringToIntervalDayTime(col 3) -> 4:interval_day_time, CastStringToIntervalDayTime(col 3) -> 5:interval_day_time) -> 6:interval_day_time, IntervalDayTimeScalarAddIntervalDayTimeColumn(val 1 02:03:04.000000000, col 4)(children: CastStringToIntervalDayTime(col 3) -> 4:interval_day_time) -> 5:timestamp, IntervalDayTimeColSubtractIntervalDayTimeColumn(col 4, col 7)(children: CastStringToIntervalDayTime(col 3) -> 4:interval_day_time, CastStringToIntervalDayTime(col 3) -> 7:interval_day_time) -> 8:interval_day_time, IntervalDayTimeScalarSubtractIntervalDayTimeColumn(val 1 02:03:04.000000000, col 4)(children: CastStringToIntervalDayTime(col 3) -> 4:interval_day_time) -> 7:timestamp
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), 2 04:06:08.000000000 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), 0 00:00:00.000000000 (type: interval_day_time), VALUE._col4 (type: interval_day_time), VALUE._col5 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 5, 1, 2, 6, 3, 4]
-                    selectExpressions: ConstantVectorExpression(val 2 04:06:08.000000000) -> 5:interval_day_time, ConstantVectorExpression(val 0 00:00:00.000000000) -> 6:interval_day_time
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -438,7 +318,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	2 04:06:08.000000000	NULL	NULL	0 00:00:00.000000000	NULL	NULL
 2001-01-01	2 04:06:08.000000000	2 04:06:08.000000000	2 04:06:08.000000000	0 00:00:00.000000000	0 00:00:00.000000000	0 00:00:00.000000000
 PREHOOK: query: -- date-interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt + interval '1-2' year to month,
@@ -456,7 +336,7 @@ select
 from vector_interval_1 order by dt
 PREHOOK: type: QUERY
 POSTHOOK: query: -- date-interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt + interval '1-2' year to month,
@@ -473,10 +353,6 @@ select
   dt - interval_day_time(str2)
 from vector_interval_1 order by dt
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -494,61 +370,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: dt (type: date), (dt + 1-2) (type: date), (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (1-2 + dt) (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + dt) (type: date), (dt - 1-2) (type: date), (dt - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (dt + 1 02:03:04.000000000) (type: timestamp), (dt + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + dt) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + dt) (type: timestamp), (dt - 1 02:03:04.000000000) (type: timestamp), (dt - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 4, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17]
-                        selectExpressions: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 4:long, DateColAddIntervalYearMonthColumn(col 1, col 5)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 6:long, IntervalYearMonthScalarAddDateColumn(val 1-2, col 1) -> 7:long, IntervalYearMonthColAddDateColumn(col 5, col 1)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 8:long, DateColSubtractIntervalYearMonthScalar(col 1, val 1-2) -> 9:long, DateColSubtractIntervalYearMonthColumn(col 1, col 5)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 10:long, DateColAddIntervalDayTimeScalar(col 1, val 1 02:03:04.000000000) -> 11:timestamp, DateColAddIntervalDayTimeColumn(col 1, col 12)(children: CastStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 13:timestamp, IntervalDayTimeScalarAddDateColumn(val 1 02:03:04.000000000, col 1) -> 14:timestamp, IntervalDayTimeColAddDateColumn(col 12, col 1)(children: Cas
 tStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 15:interval_day_time, DateColSubtractIntervalDayTimeScalar(col 1, val 1 02:03:04.000000000) -> 16:timestamp, DateColSubtractIntervalDayTimeColumn(col 1, col 12)(children: CastStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 17:timestamp
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: date), VALUE._col4 (type: date), VALUE._col5 (type: date), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -600,7 +441,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 2001-01-01	2002-03-01	2002-03-01	2002-03-01	2002-03-01	1999-11-01	1999-11-01	2001-01-02 02:03:04	2001-01-02 02:03:04	2001-01-02 02:03:04	2001-01-02 02:03:04	2000-12-30 21:56:56	2000-12-30 21:56:56
 PREHOOK: query: -- timestamp-interval arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts + interval '1-2' year to month,
@@ -618,7 +459,7 @@ select
 from vector_interval_1 order by ts
 PREHOOK: type: QUERY
 POSTHOOK: query: -- timestamp-interval arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts + interval '1-2' year to month,
@@ -635,10 +476,6 @@ select
   ts - interval_day_time(str2)
 from vector_interval_1 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -656,61 +493,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: ts (type: timestamp), (ts + 1-2) (type: timestamp), (ts + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (1-2 + ts) (type: timestamp), (CAST( str1 AS INTERVAL YEAR TO MONTH) + ts) (type: timestamp), (ts - 1-2) (type: timestamp), (ts - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (ts + 1 02:03:04.000000000) (type: timestamp), (ts + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + ts) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + ts) (type: timestamp), (ts - 1 02:03:04.000000000) (type: timestamp), (ts - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17]
-                        selectExpressions: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 4:timestamp, TimestampColAddIntervalYearMonthColumn(col 0, col 5)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 6:timestamp, IntervalYearMonthScalarAddTimestampColumn(val 1-2, col 0) -> 7:timestamp, IntervalYearMonthColAddTimestampColumn(col 5, col 0)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 8:timestamp, TimestampColSubtractIntervalYearMonthScalar(col 0, val 1-2) -> 9:timestamp, TimestampColSubtractIntervalYearMonthColumn(col 0, col 5)(children: CastStringToIntervalYearMonth(col 2) -> 5:interval_year_month) -> 10:timestamp, TimestampColAddIntervalDayTimeScalar(col 0, val 1 02:03:04.000000000) -> 11:timestamp, TimestampColAddIntervalDayTimeColumn(col 0, col 12)(children: CastStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 13:timestamp, IntervalDayTimeScalarAddTimestampColumn(val 1 02:03:04.000000000, col 0) ->
  14:timestamp, IntervalDayTimeColAddTimestampColumn(col 12, col 0)(children: CastStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 15:timestamp, TimestampColSubtractIntervalDayTimeScalar(col 0, val 1 02:03:04.000000000) -> 16:timestamp, TimestampColSubtractIntervalDayTimeColumn(col 0, col 12)(children: CastStringToIntervalDayTime(col 3) -> 12:interval_day_time) -> 17:timestamp
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -762,7 +564,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 2001-01-01 01:02:03	2002-03-01 01:02:03	2002-03-01 01:02:03	2002-03-01 01:02:03	2002-03-01 01:02:03	1999-11-01 01:02:03	1999-11-01 01:02:03	2001-01-02 03:05:07	2001-01-02 03:05:07	2001-01-02 03:05:07	2001-01-02 03:05:07	2000-12-30 22:58:59	2000-12-30 22:58:59
 PREHOOK: query: -- timestamp-timestamp arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts - ts,
@@ -771,7 +573,7 @@ select
 from vector_interval_1 order by ts
 PREHOOK: type: QUERY
 POSTHOOK: query: -- timestamp-timestamp arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts - ts,
@@ -779,10 +581,6 @@ select
   ts - timestamp '2001-01-01 01:02:03'
 from vector_interval_1 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -800,61 +598,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: ts (type: timestamp), (ts - ts) (type: interval_day_time), (2001-01-01 01:02:03.0 - ts) (type: interval_day_time), (ts - 2001-01-01 01:02:03.0) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 5, 6]
-                        selectExpressions: TimestampColSubtractTimestampColumn(col 0, col 0) -> 4:interval_day_time, TimestampScalarSubtractTimestampColumn(val 2001-01-01 01:02:03.0, col 0) -> 5:timestamp, TimestampColSubtractTimestampScalar(col 0, val 2001-01-01 01:02:03.0) -> 6:interval_day_time
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -888,7 +651,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	NULL	NULL	NULL
 2001-01-01 01:02:03	0 00:00:00.000000000	0 00:00:00.000000000	0 00:00:00.000000000
 PREHOOK: query: -- date-date arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt - dt,
@@ -897,7 +660,7 @@ select
 from vector_interval_1 order by dt
 PREHOOK: type: QUERY
 POSTHOOK: query: -- date-date arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt - dt,
@@ -905,10 +668,6 @@ select
   dt - date '2001-01-01'
 from vector_interval_1 order by dt
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -926,61 +685,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: dt (type: date), (dt - dt) (type: interval_day_time), (2001-01-01 - dt) (type: interval_day_time), (dt - 2001-01-01) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 4, 5, 6]
-                        selectExpressions: DateColSubtractDateColumn(col 1, col 1) -> 4:timestamp, DateScalarSubtractDateColumn(val 2001-01-01 00:00:00.0, col 1) -> 5:timestamp, DateColSubtractDateScalar(col 1, val 2001-01-01 00:00:00.0) -> 6:timestamp
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1014,7 +738,7 @@ POSTHOOK: Input: default@vector_interval_1
 NULL	NULL	NULL	NULL
 2001-01-01	0 00:00:00.000000000	0 00:00:00.000000000	0 00:00:00.000000000
 PREHOOK: query: -- date-timestamp arithmetic
-explain vectorization expression
+explain
 select
   dt,
   ts - dt,
@@ -1026,7 +750,7 @@ select
 from vector_interval_1 order by dt
 PREHOOK: type: QUERY
 POSTHOOK: query: -- date-timestamp arithmetic
-explain vectorization expression
+explain
 select
   dt,
   ts - dt,
@@ -1037,10 +761,6 @@ select
   date '2001-01-01' - ts
 from vector_interval_1 order by dt
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1058,61 +778,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Select Operator
                     expressions: dt (type: date), (ts - dt) (type: interval_day_time), (2001-01-01 01:02:03.0 - dt) (type: interval_day_time), (ts - 2001-01-01) (type: interval_day_time), (dt - ts) (type: interval_day_time), (dt - 2001-01-01 01:02:03.0) (type: interval_day_time), (2001-01-01 - ts) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 4, 5, 6, 7, 8, 9]
-                        selectExpressions: TimestampColSubtractDateColumn(col 0, col 1) -> 4:interval_day_time, TimestampScalarSubtractDateColumn(val 2001-01-01 01:02:03.0, col 1) -> 5:interval_day_time, TimestampColSubtractDateScalar(col 0, val 2001-01-01 00:00:00.0) -> 6:interval_day_time, DateColSubtractTimestampColumn(col 1, col 0) -> 7:interval_day_time, DateColSubtractTimestampScalar(col 1, val 2001-01-01 01:02:03.0) -> 8:interval_day_time, DateScalarSubtractTimestampColumn(val 2001-01-01 00:00:00.0, col 0) -> 9:interval_day_time
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time), VALUE._col4 (type: interval_day_time), VALUE._col5 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[43/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q
index 11df12e..8ed041b 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -39,7 +39,7 @@ alter table part_add_int_permute_select add columns(c int);
 
 insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333);
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -62,7 +62,7 @@ alter table part_add_int_string_permute_select add columns(c int, d string);
 
 insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444');
 
-explain vectorization detail
+explain
 select insert_num,part,a,b from part_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -94,7 +94,7 @@ alter table part_change_string_group_double replace columns (insert_num int, c1
 
 insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
 
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
@@ -117,7 +117,7 @@ alter table part_change_date_group_string_group_date_timestamp replace columns(i
 
 insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
@@ -165,7 +165,7 @@ insert into table part_change_numeric_group_string_group_multi_ints_string_group
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
@@ -208,7 +208,7 @@ insert into table part_change_numeric_group_string_group_floating_string_group p
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
@@ -250,7 +250,7 @@ insert into table part_change_string_group_string_group_string partition(part=1)
           'filler', 'filler', 'filler',
           'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
@@ -300,7 +300,7 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa
             1234.5678, 9876.543, 789.321,
            'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
@@ -331,7 +331,7 @@ alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace c
 
 insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new');
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;
 
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
index dfd55d9..b9d035e 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -54,7 +54,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt'
 
 insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,s1,b from part_change_various_various_struct1;
 
 select insert_num,part,s1,b from part_change_various_various_struct1;
@@ -114,7 +114,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt'
 
 insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s2 from part_add_various_various_struct2;
 
 select insert_num,part,b,s2 from part_add_various_various_struct2;
@@ -158,7 +158,7 @@ load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt'
 
 insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;
 
 select insert_num,part,b,s3 from part_add_to_various_various_struct4;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q
index d71c6b8..f5b0485 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.mapred.mode=nonstrict;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
@@ -74,7 +74,7 @@ insert into table part_change_various_various_boolean_to_bigint partition(part=1
              bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, 
               'new' FROM schema_evolution_data;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint;
@@ -116,7 +116,7 @@ insert into table part_change_various_various_decimal_to_double partition(part=1
              double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1,
              'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double;
@@ -140,7 +140,7 @@ alter table part_change_various_various_timestamp replace columns (insert_num in
 
 insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp;
@@ -161,7 +161,7 @@ alter table part_change_various_various_date replace columns (insert_num int, c1
 
 insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
 
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date;
@@ -200,7 +200,7 @@ load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' over
 
 insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt;
 
-explain vectorization detail
+explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;
 
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
index d4209a5..c8d5458 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
@@ -1,4 +1,4 @@
-set hive.explain.user=false;
+set hive.explain.user=true;
 set hive.cli.print.header=true;
 SET hive.exec.schema.evolution=true;
 SET hive.vectorized.use.vectorized.input.format=false;
@@ -38,7 +38,7 @@ alter table table_add_int_permute_select add columns(c int);
 
 insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000);
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -61,7 +61,7 @@ alter table table_add_int_string_permute_select add columns(c int, d string);
 
 insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler');
 
-explain vectorization detail
+explain
 select insert_num,a,b from table_add_int_string_permute_select;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
@@ -93,7 +93,7 @@ alter table table_change_string_group_double replace columns (insert_num int, c1
 
 insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
 
 select insert_num,c1,c2,c3,b from table_change_string_group_double;
@@ -160,7 +160,7 @@ insert into table table_change_numeric_group_string_group_multi_ints_string_grou
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group;
@@ -203,7 +203,7 @@ insert into table table_change_numeric_group_string_group_floating_string_group
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-explain vectorization detail
+explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;
 
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
index 449bea2..cef4e4c 100644
--- a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
+++ b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
@@ -32,7 +32,7 @@ insert into table count_case_groupby values ('key1', true),('key2', false),('key
 
 set hive.vectorized.adaptor.usage.mode=none;
 
-explain vectorization expression
+explain
 select
   c2 regexp 'val',
   c4 regexp 'val',
@@ -45,7 +45,7 @@ select
   (c2 regexp 'val') = (c4 regexp 'val')
 from varchar_udf_1 limit 1;
 
-explain vectorization expression
+explain
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
@@ -58,7 +58,7 @@ select
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
 from varchar_udf_1 limit 1;
 
-explain vectorization expression
+explain
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
@@ -74,7 +74,7 @@ from varchar_udf_1 limit 1;
 
 set hive.vectorized.adaptor.usage.mode=chosen;
 
-explain vectorization expression
+explain
 select
   c2 regexp 'val',
   c4 regexp 'val',
@@ -87,7 +87,7 @@ select
   (c2 regexp 'val') = (c4 regexp 'val')
 from varchar_udf_1 limit 1;
 
-explain vectorization expression
+explain
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
@@ -100,7 +100,7 @@ select
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
 from varchar_udf_1 limit 1;
 
-explain vectorization expression
+explain
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
@@ -116,11 +116,11 @@ from varchar_udf_1 limit 1;
 
 set hive.vectorized.adaptor.usage.mode=none;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT POWER(key, 2) FROM DECIMAL_UDF;
+EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF;
 
 SELECT POWER(key, 2) FROM DECIMAL_UDF;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   exp(key), ln(key),
   log(key), log(key, key), log(key, value), log(value, key),
@@ -135,11 +135,11 @@ FROM DECIMAL_UDF WHERE key = 10;
 
 set hive.vectorized.adaptor.usage.mode=chosen;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT POWER(key, 2) FROM DECIMAL_UDF;
+EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF;
 
 SELECT POWER(key, 2) FROM DECIMAL_UDF;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   exp(key), ln(key),
   log(key), log(key, key), log(key, value), log(value, key),
@@ -155,14 +155,14 @@ FROM DECIMAL_UDF WHERE key = 10;
 
 set hive.vectorized.adaptor.usage.mode=none;
 
-explain vectorization expression
+explain
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;
 
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;
 
 set hive.vectorized.adaptor.usage.mode=chosen;
 
-explain vectorization expression
+explain
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;
 
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_aggregate_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_aggregate_9.q b/ql/src/test/queries/clientpositive/vector_aggregate_9.q
index 04fdeec..ce6f0ff 100644
--- a/ql/src/test/queries/clientpositive/vector_aggregate_9.q
+++ b/ql/src/test/queries/clientpositive/vector_aggregate_9.q
@@ -1,6 +1,5 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 create table vectortab2k(
             t tinyint,
@@ -39,7 +38,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_aggregate_without_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_aggregate_without_gby.q b/ql/src/test/queries/clientpositive/vector_aggregate_without_gby.q
index 64440e3..8a63635 100644
--- a/ql/src/test/queries/clientpositive/vector_aggregate_without_gby.q
+++ b/ql/src/test/queries/clientpositive/vector_aggregate_without_gby.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=true;
-set hive.fetch.task.conversion=none;
-
 create table testvec(id int, dt int, greg_dt string) stored as orc;
 insert into table testvec
 values 
@@ -14,5 +12,5 @@ values
 (7,20150404, '2015-04-04');
 set hive.vectorized.execution.enabled=true;
 set hive.map.aggr=true;
-explain vectorization select max(dt), max(greg_dt) from testvec where id=5;
+explain select max(dt), max(greg_dt) from testvec where id=5;
 select max(dt), max(greg_dt) from testvec where id=5;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
index 3c53853..2077f8e 100644
--- a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
-set hive.explain.user=false;
+set hive.explain.user=true;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+;
 
 set hive.exec.reducers.max = 1;
 
@@ -23,7 +23,7 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 set hive.auto.convert.sortmerge.join=true;
 
 -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
 ) subq1;
@@ -33,7 +33,7 @@ select count(*) from (
 ) subq1;
 
 -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from
 (
   select key, count(*) from 
@@ -54,7 +54,7 @@ select count(*) from
 
 -- A join is being performed across different sub-queries, where a join is being performed in each of them.
 -- Each sub-query should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from 
@@ -89,7 +89,7 @@ on src1.key = src2.key;
 
 -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
@@ -104,7 +104,7 @@ select count(*) from
 
 -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
 -- be converted to a sort-merge join, although there is more than one level of sub-query
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -129,7 +129,7 @@ select count(*) from
 
 -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (
   select * from
@@ -169,7 +169,7 @@ select count(*) from
 -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
 -- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
 -- item, but that is not part of the join key.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
     join
@@ -184,7 +184,7 @@ select count(*) from
 
 -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
 -- join should be performed
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
     join
@@ -199,7 +199,7 @@ select count(*) from
 
 -- One of the tables is a sub-query and the other is not.
 -- It should be converted to a sort-merge join.
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join tbl2 a on subq1.key = a.key;
@@ -210,7 +210,7 @@ select count(*) from
 
 -- There are more than 2 inputs to the join, all of them being sub-queries. 
 -- It should be converted to to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from 
   (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
     join
@@ -231,7 +231,7 @@ select count(*) from
 
 -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
 -- The join should be converted to a sort-merge join
-explain vectorization expression
+explain
 select count(*) from (
   select subq2.key as key, subq2.value as value1, b.value as value2 from
   (
@@ -261,7 +261,7 @@ CREATE TABLE dest2(key int, val1 string, val2 string);
 
 -- The join is followed by a multi-table insert. It should be converted to
 -- a sort-merge join
-explain vectorization expression
+explain
 from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
 ) subq1
@@ -282,7 +282,7 @@ CREATE TABLE dest2(key int, cnt int);
 
 -- The join is followed by a multi-table insert, and one of the inserts involves a reducer.
 -- It should be converted to a sort-merge join
-explain vectorization expression
+explain
 from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
 ) subq1

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_between_columns.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_between_columns.q b/ql/src/test/queries/clientpositive/vector_between_columns.q
index 920b692..41f9243 100644
--- a/ql/src/test/queries/clientpositive/vector_between_columns.q
+++ b/ql/src/test/queries/clientpositive/vector_between_columns.q
@@ -24,13 +24,13 @@ create table TSINT stored as orc AS SELECT * FROM TSINT_txt;
 create table TINT stored as orc AS SELECT * FROM TINT_txt;
 
 
-explain vectorization expression
+explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint, (case when (tint.cint between tsint.csint and tsint.csint) then "Ok" else "NoOk" end) as between_col from tint , tsint;
 
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint, (case when (tint.cint between tsint.csint and tsint.csint) then "Ok" else "NoOk" end) as between_col from tint , tsint;
 
 
-explain vectorization expression
+explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint;
 
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_between_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_between_in.q b/ql/src/test/queries/clientpositive/vector_between_in.q
index b9f4b13..487bf96 100644
--- a/ql/src/test/queries/clientpositive/vector_between_in.q
+++ b/ql/src/test/queries/clientpositive/vector_between_in.q
@@ -1,25 +1,24 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE));
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE));
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1;
+EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568);
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568);
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate;
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate;
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1;
+EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351;
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351;
 
 SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
 
@@ -41,13 +40,13 @@ SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 439
 
 -- projections
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0;
+EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0;
+EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0;
+EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0;
 
 SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q
index d19dbc3..1d99e34 100644
--- a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q
+++ b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q
@@ -4,7 +4,6 @@ SET hive.auto.convert.join=true;
 SET hive.auto.convert.join.noconditionaltask=true;
 SET hive.auto.convert.join.noconditionaltask.size=1000000000;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 DROP TABLE over1k;
 DROP TABLE hundredorc;
@@ -41,14 +40,14 @@ STORED AS ORC;
 
 INSERT INTO TABLE hundredorc SELECT * FROM over1k LIMIT 100;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN 
 SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.bin = t2.bin;
 
 SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.bin = t2.bin;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN 
 SELECT count(*), bin
 FROM hundredorc
 GROUP BY bin;
@@ -59,6 +58,6 @@ GROUP BY bin;
 
 -- HIVE-14045: Involve a binary vector scratch column for small table result (Native Vector MapJoin).
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT t1.i, t1.bin, t2.bin
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.i = t2.i;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_bround.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bround.q b/ql/src/test/queries/clientpositive/vector_bround.q
index ffa3ad3..deea00b 100644
--- a/ql/src/test/queries/clientpositive/vector_bround.q
+++ b/ql/src/test/queries/clientpositive/vector_bround.q
@@ -1,7 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=true;
-SET hive.fetch.task.conversion=none;
-
 create table test_vector_bround(v0 double, v1 double) stored as orc;
 insert into table test_vector_bround
 values
@@ -14,5 +12,5 @@ values
 (2.51, 1.251),
 (3.51, 1.351);
 set hive.vectorized.execution.enabled=true;
-explain vectorization select bround(v0), bround(v1, 1) from test_vector_bround;
+explain select bround(v0), bround(v1, 1) from test_vector_bround;
 select bround(v0), bround(v1, 1) from test_vector_bround;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bucket.q b/ql/src/test/queries/clientpositive/vector_bucket.q
index b67592e..39436c9 100644
--- a/ql/src/test/queries/clientpositive/vector_bucket.q
+++ b/ql/src/test/queries/clientpositive/vector_bucket.q
@@ -2,13 +2,12 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.support.concurrency=true;
-set hive.fetch.task.conversion=none;
 
 
 CREATE TABLE non_orc_table(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS sequencefile; 
 
 
-explain vectorization expression
+explain
 insert into table non_orc_table values(1, 'one'),(1, 'one'), (2, 'two'),(3, 'three'); select a, b from non_orc_table order by a;
 
 insert into table non_orc_table values(1, 'one'),(1, 'one'), (2, 'two'),(3, 'three'); select a, b from non_orc_table order by a;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_cast_constant.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_cast_constant.q b/ql/src/test/queries/clientpositive/vector_cast_constant.q
index aac7f92..94bee09 100644
--- a/ql/src/test/queries/clientpositive/vector_cast_constant.q
+++ b/ql/src/test/queries/clientpositive/vector_cast_constant.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+
 
 DROP TABLE over1k;
 DROP TABLE over1korc;
@@ -38,7 +38,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE over1korc SELECT * FROM over1k;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT 
+EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_char_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_2.q b/ql/src/test/queries/clientpositive/vector_char_2.q
index 5520ddd..f1bb75b 100644
--- a/ql/src/test/queries/clientpositive/vector_char_2.q
+++ b/ql/src/test/queries/clientpositive/vector_char_2.q
@@ -1,8 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 drop table char_2;
 
 create table char_2 (
@@ -18,7 +16,7 @@ group by value
 order by value asc
 limit 5;
 
-explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value asc
@@ -37,7 +35,7 @@ group by value
 order by value desc
 limit 5;
 
-explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value desc

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_char_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_4.q b/ql/src/test/queries/clientpositive/vector_char_4.q
index edc59cd..06f1d2b 100644
--- a/ql/src/test/queries/clientpositive/vector_char_4.q
+++ b/ql/src/test/queries/clientpositive/vector_char_4.q
@@ -1,6 +1,5 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 drop table if exists vectortab2k;
 drop table if exists vectortab2korc;
@@ -45,7 +44,7 @@ INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 drop table if exists char_lazy_binary_columnar; 
 create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile;
 
-explain vectorization expression
+explain
 insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc;
 
 -- insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_char_cast.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_cast.q b/ql/src/test/queries/clientpositive/vector_char_cast.q
index c7d3c3c..bc78d51 100644
--- a/ql/src/test/queries/clientpositive/vector_char_cast.q
+++ b/ql/src/test/queries/clientpositive/vector_char_cast.q
@@ -1,5 +1,3 @@
-set hive.fetch.task.conversion=none;
-
 create table s1(id smallint) stored as orc;
 
 insert into table s1 values (1000),(1001),(1002),(1003),(1000);

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q b/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q
index f5c05a4..58a73be 100644
--- a/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q
+++ b/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q
@@ -37,21 +37,21 @@ create table char_join1_vc2_orc stored as orc as select * from char_join1_vc2;
 create table char_join1_str_orc stored as orc as select * from char_join1_str;
 
 -- Join char with same length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- SORT_QUERY_RESULTS
 
 select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- Join char with different length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- SORT_QUERY_RESULTS
 
 select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- Join char with string
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1;
+explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1;
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_char_simple.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_simple.q b/ql/src/test/queries/clientpositive/vector_char_simple.q
index c315241..a921140 100644
--- a/ql/src/test/queries/clientpositive/vector_char_simple.q
+++ b/ql/src/test/queries/clientpositive/vector_char_simple.q
@@ -1,7 +1,5 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
-
 drop table char_2;
 
 create table char_2 (
@@ -16,7 +14,7 @@ from src
 order by key asc
 limit 5;
 
-explain vectorization only select key, value
+explain select key, value
 from char_2
 order by key asc
 limit 5;
@@ -32,7 +30,7 @@ from src
 order by key desc
 limit 5;
 
-explain vectorization only select key, value
+explain select key, value
 from char_2
 order by key desc
 limit 5;
@@ -51,7 +49,7 @@ create table char_3 (
   field char(12)
 ) stored as orc;
 
-explain vectorization only operator
+explain
 insert into table char_3 select cint from alltypesorc limit 10;
 
 insert into table char_3 select cint from alltypesorc limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_coalesce.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_coalesce.q b/ql/src/test/queries/clientpositive/vector_coalesce.q
index 11296e1..cfba7be 100644
--- a/ql/src/test/queries/clientpositive/vector_coalesce.q
+++ b/ql/src/test/queries/clientpositive/vector_coalesce.q
@@ -1,11 +1,10 @@
 set hive.stats.fetch.column.stats=true;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
+EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) as c
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 ORDER BY cdouble, cstring1, cint, cfloat, csmallint, c
@@ -17,7 +16,7 @@ WHERE (cdouble IS NULL)
 ORDER BY cdouble, cstring1, cint, cfloat, csmallint, c
 LIMIT 10;
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
+EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0) as c
 FROM alltypesorc
 WHERE (ctinyint IS NULL)
 ORDER BY ctinyint, cdouble, cint, c
@@ -29,7 +28,7 @@ WHERE (ctinyint IS NULL)
 ORDER BY ctinyint, cdouble, cint, c
 LIMIT 10;
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
+EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
@@ -41,7 +40,7 @@ WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10;
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
+EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2) as c
 FROM alltypesorc 
 WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL
 ORDER BY ctimestamp1, ctimestamp2, c
@@ -53,7 +52,7 @@ WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL
 ORDER BY ctimestamp1, ctimestamp2, c
 LIMIT 10;
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
+EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint) as c
 FROM alltypesorc
 WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
@@ -65,7 +64,7 @@ WHERE (cfloat IS NULL AND cbigint IS NULL)
 ORDER BY cfloat, cbigint, c
 LIMIT 10;
 
-EXPLAIN VECTORIZATION ONLY EXPRESSION SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
+EXPLAIN SELECT cbigint, ctinyint, coalesce(cbigint, ctinyint) as c
 FROM alltypesorc
 WHERE cbigint IS NULL
 LIMIT 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_coalesce_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_coalesce_2.q b/ql/src/test/queries/clientpositive/vector_coalesce_2.q
index ea45ddd..c847e20 100644
--- a/ql/src/test/queries/clientpositive/vector_coalesce_2.q
+++ b/ql/src/test/queries/clientpositive/vector_coalesce_2.q
@@ -7,7 +7,7 @@ create table str_str_orc (str1 string, str2 string) stored as orc;
 
 insert into table str_str_orc values (null, "X"), ("0", "X"), ("1", "X"), (null, "y");
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
@@ -18,7 +18,7 @@ SELECT
 from str_str_orc
 GROUP BY str2;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc;
 
@@ -27,7 +27,7 @@ from str_str_orc;
 
 SET hive.vectorized.execution.enabled=true;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result
 from str_str_orc
@@ -38,7 +38,7 @@ SELECT
 from str_str_orc
 GROUP BY str2;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT COALESCE(str1, 0) as result
 from str_str_orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_complex_all.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_complex_all.q b/ql/src/test/queries/clientpositive/vector_complex_all.q
index bd1f4a9..91a7368 100644
--- a/ql/src/test/queries/clientpositive/vector_complex_all.q
+++ b/ql/src/test/queries/clientpositive/vector_complex_all.q
@@ -27,19 +27,19 @@ CREATE TABLE orc_create_complex (
 INSERT OVERWRITE TABLE orc_create_complex SELECT * FROM orc_create_staging;
 
 -- Since complex types are not supported, this query should not vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT * FROM orc_create_complex;
 
 SELECT * FROM orc_create_complex;
 
 -- However, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT COUNT(*) FROM orc_create_complex;
 
 SELECT COUNT(*) FROM orc_create_complex;
 
 -- Also, since this query is not referencing the complex fields, it should vectorize.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT str FROM orc_create_complex ORDER BY str;
 
 SELECT str FROM orc_create_complex ORDER BY str;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_complex_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_complex_join.q b/ql/src/test/queries/clientpositive/vector_complex_join.q
index c6926cb..30f38b1 100644
--- a/ql/src/test/queries/clientpositive/vector_complex_join.q
+++ b/ql/src/test/queries/clientpositive/vector_complex_join.q
@@ -10,7 +10,7 @@ set hive.fetch.task.conversion=none;
 CREATE TABLE test (a INT, b MAP<INT, STRING>) STORED AS ORC;
 INSERT OVERWRITE TABLE test SELECT 199408978, MAP(1, "val_1", 2, "val_2") FROM src LIMIT 1;
 
-explain vectorization expression
+explain
 select * from alltypesorc join test where alltypesorc.cint=test.a;
 
 select * from alltypesorc join test where alltypesorc.cint=test.a;
@@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src LIMIT 1;
 CREATE TABLE test2b (a INT) STORED AS ORC;
 INSERT OVERWRITE TABLE test2b VALUES (2), (3), (4);
 
-explain vectorization expression
+explain
 select *  from test2b join test2a on test2b.a = test2a.a[1];
 
 select *  from test2b join test2a on test2b.a = test2a.a[1];
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_count.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_count.q b/ql/src/test/queries/clientpositive/vector_count.q
index 74b24cc..341db74 100644
--- a/ql/src/test/queries/clientpositive/vector_count.q
+++ b/ql/src/test/queries/clientpositive/vector_count.q
@@ -12,15 +12,15 @@ create table abcd stored as orc as select * from abcd_txt;
 
 select * from abcd;
 set hive.map.aggr=true;
-explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
+explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
 select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
 
-explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
+explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
 select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
 
 set hive.map.aggr=false;
-explain vectorization expression select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
+explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
 select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
 
-explain vectorization expression select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
+explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
 select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_count_distinct.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_count_distinct.q b/ql/src/test/queries/clientpositive/vector_count_distinct.q
index 72ca3fa..ec72079 100644
--- a/ql/src/test/queries/clientpositive/vector_count_distinct.q
+++ b/ql/src/test/queries/clientpositive/vector_count_distinct.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 create table web_sales_txt
 (
@@ -105,7 +104,7 @@ select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk,
 
 ------------------------------------------------------------------------------------------
 
-explain vectorization expression
+explain
 select count(distinct ws_order_number) from web_sales;
 
 select count(distinct ws_order_number) from web_sales;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_data_types.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_data_types.q b/ql/src/test/queries/clientpositive/vector_data_types.q
index d3ee19b..c7e0d1b 100644
--- a/ql/src/test/queries/clientpositive/vector_data_types.q
+++ b/ql/src/test/queries/clientpositive/vector_data_types.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
 
 DROP TABLE over1k;
 DROP TABLE over1korc;
@@ -39,7 +38,7 @@ INSERT INTO TABLE over1korc SELECT * FROM over1k;
 
 SET hive.vectorized.execution.enabled=false;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
+EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
 
 SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
 
@@ -48,7 +47,7 @@ FROM (SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, s
 
 SET hive.vectorized.execution.enabled=true;
 
-EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
+EXPLAIN select t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
 
 SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_date_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_date_1.q b/ql/src/test/queries/clientpositive/vector_date_1.q
index 7e8768c..072ed5c 100644
--- a/ql/src/test/queries/clientpositive/vector_date_1.q
+++ b/ql/src/test/queries/clientpositive/vector_date_1.q
@@ -1,7 +1,8 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
+
 set hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 drop table if exists vector_date_1;
 create table vector_date_1 (dt1 date, dt2 date) stored as orc;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_1.q b/ql/src/test/queries/clientpositive/vector_decimal_1.q
index e797892..8a1503f 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_1.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_1.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 drop table if exists decimal_1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_10_0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q
index 14650f9..596b2bd 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS decimal_txt;
 DROP TABLE IF EXISTS `decimal`;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_2.q b/ql/src/test/queries/clientpositive/vector_decimal_2.q
index e00fefe..f1477ce 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_2.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_2.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 drop table decimal_2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_3.q b/ql/src/test/queries/clientpositive/vector_decimal_3.q
index c23a652..9fa5d6f 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_3.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_3.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_3_txt;
 DROP TABLE IF EXISTS DECIMAL_3;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_4.q b/ql/src/test/queries/clientpositive/vector_decimal_4.q
index 0c34074..29c9875 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_4.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_4.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_4_1;
 DROP TABLE IF EXISTS DECIMAL_4_2;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_5.q b/ql/src/test/queries/clientpositive/vector_decimal_5.q
index f5de13b..7cf604d 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_5.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_5.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_5_txt;
 DROP TABLE IF EXISTS DECIMAL_5;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_6.q b/ql/src/test/queries/clientpositive/vector_decimal_6.q
index fe145e6..1d0c3ae 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_6.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_6.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_6_1_txt;
 DROP TABLE IF EXISTS DECIMAL_6_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
index 843b57e..552a564 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
@@ -1,6 +1,4 @@
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
-
 CREATE TABLE decimal_vgby STORED AS ORC AS 
     SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, 
     CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
@@ -12,7 +10,7 @@ SET hive.vectorized.execution.enabled=true;
 -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
     FROM decimal_vgby
@@ -26,7 +24,7 @@ SELECT cint,
     HAVING COUNT(*) > 1;
 
 -- Now add the others...
-EXPLAIN VECTORIZATION EXPRESSION SELECT cint,
+EXPLAIN SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
     COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
     FROM decimal_vgby

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_cast.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_cast.q b/ql/src/test/queries/clientpositive/vector_decimal_cast.q
index fc8861e..eb0e75c 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_cast.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_cast.q
@@ -1,7 +1,6 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10;
+EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10;
 
 SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
index 864e552..33d0747 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
@@ -1,12 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
 SET hive.vectorized.execution.enabled=true;
-EXPLAIN VECTORIZATION EXPRESSION SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
+EXPLAIN SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
index dac0317..3007239 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
@@ -4,7 +4,6 @@ SET hive.auto.convert.join=true;
 SET hive.auto.convert.join.noconditionaltask=true;
 SET hive.auto.convert.join.noconditionaltask.size=1000000000;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 CREATE TABLE over1k(t tinyint,
            si smallint,
@@ -27,7 +26,7 @@ INSERT INTO TABLE t1 select dec from over1k;
 CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC;
 INSERT INTO TABLE t2 select dec from over1k;
 
-explain vectorization expression
+explain
 select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
index 08e1e0f..4ebde6a 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
@@ -1,13 +1,11 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-set hive.fetch.task.conversion=none;
 CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression 
+explain 
 select
    cdecimal1
   ,Round(cdecimal1, 2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_precision.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_precision.q b/ql/src/test/queries/clientpositive/vector_decimal_precision.q
index 97616f6..cc3fb63 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_precision.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_precision.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_PRECISION_txt;
 DROP TABLE IF EXISTS DECIMAL_PRECISION;
@@ -26,7 +26,7 @@ SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec;
 SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec;
 SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION;
+EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION;
 SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION;
 
 SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_round.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_round.q b/ql/src/test/queries/clientpositive/vector_decimal_round.q
index ba20fef..bf83163 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_round.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_round.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 create table decimal_tbl_txt (dec decimal(10,0)) 
 ROW FORMAT DELIMITED
@@ -12,12 +12,12 @@ insert into table decimal_tbl_txt values(101);
 
 select * from decimal_tbl_txt;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_txt order by dec;
 
 select dec, round(dec, -1) from decimal_tbl_txt order by dec;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1);
 
 select dec, round(dec, -1) from decimal_tbl_txt order by round(dec, -1);
@@ -29,12 +29,12 @@ insert into table decimal_tbl_rc values(101);
 
 select * from decimal_tbl_rc;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_rc order by dec;
 
 select dec, round(dec, -1) from decimal_tbl_rc order by dec;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1);
 
 select dec, round(dec, -1) from decimal_tbl_rc order by round(dec, -1);
@@ -46,12 +46,12 @@ insert into table decimal_tbl_orc values(101);
 
 select * from decimal_tbl_orc;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_orc order by dec;
 
 select dec, round(dec, -1) from decimal_tbl_orc order by dec;
 
-explain vectorization expression
+explain
 select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1);
 
 select dec, round(dec, -1) from decimal_tbl_orc order by round(dec, -1);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_round_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_round_2.q b/ql/src/test/queries/clientpositive/vector_decimal_round_2.q
index 7afc780..0020325 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_round_2.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_round_2.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 create table decimal_tbl_1_orc (dec decimal(38,18)) 
 STORED AS ORC;
@@ -19,7 +19,7 @@ select * from decimal_tbl_1_orc;
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
 -- FROM decimal_tbl_1_orc ORDER BY dec;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3),
   round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4),
@@ -39,7 +39,7 @@ insert into table decimal_tbl_2_orc values(125.315, -125.315);
 
 select * from decimal_tbl_2_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
   round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4),
@@ -65,7 +65,7 @@ insert into table decimal_tbl_3_orc values(3.141592653589793);
 
 select * from decimal_tbl_3_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
   round(dec, -13), round(dec, -14),
@@ -113,7 +113,7 @@ insert into table decimal_tbl_4_orc values(1809242.3151111344, -1809242.31511113
 
 select * from decimal_tbl_4_orc;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_trailing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_trailing.q b/ql/src/test/queries/clientpositive/vector_decimal_trailing.q
index 40935aa..5f13396 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_trailing.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_trailing.q
@@ -1,6 +1,6 @@
 set hive.mapred.mode=nonstrict;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_TRAILING_txt;
 DROP TABLE IF EXISTS DECIMAL_TRAILING;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_decimal_udf2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q
index a013f1f..433f464 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 DROP TABLE IF EXISTS DECIMAL_UDF2_txt;
 DROP TABLE IF EXISTS DECIMAL_UDF2;
@@ -18,14 +18,14 @@ STORED AS ORC;
 
 INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key)
 FROM DECIMAL_UDF2 WHERE key = 10;
 
 SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key)
 FROM DECIMAL_UDF2 WHERE key = 10;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   exp(key), ln(key),
   log(key), log(key, key), log(key, value), log(value, key),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_distinct_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_distinct_2.q b/ql/src/test/queries/clientpositive/vector_distinct_2.q
index 4be23c1..509b262 100644
--- a/ql/src/test/queries/clientpositive/vector_distinct_2.q
+++ b/ql/src/test/queries/clientpositive/vector_distinct_2.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -42,7 +41,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select distinct s, t from vectortab2korc;
 
 select distinct s, t from vectortab2korc;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_elt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_elt.q b/ql/src/test/queries/clientpositive/vector_elt.q
index 5e54cbb..f44a3be 100644
--- a/ql/src/test/queries/clientpositive/vector_elt.q
+++ b/ql/src/test/queries/clientpositive/vector_elt.q
@@ -1,9 +1,8 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
-EXPLAIN VECTORIZATION EXPRESSION SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
+EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10;
 
@@ -11,7 +10,7 @@ SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cin
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
        elt('1', 'abc', 'defg'),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_empty_where.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_empty_where.q b/ql/src/test/queries/clientpositive/vector_empty_where.q
index 3e94c92..0543a65 100644
--- a/ql/src/test/queries/clientpositive/vector_empty_where.q
+++ b/ql/src/test/queries/clientpositive/vector_empty_where.q
@@ -2,22 +2,22 @@ SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
 -- HIVE-
-explain vectorization expression
+explain
 select count (distinct cint) from alltypesorc where cstring1;
 
 select count (distinct cint) from alltypesorc where cstring1;
 
-explain vectorization expression
+explain
 select count (distinct cint) from alltypesorc where cint;
 
 select count (distinct cint) from alltypesorc where cint;
 
-explain vectorization expression
+explain
 select count (distinct cint) from alltypesorc where cfloat;
 
 select count (distinct cint) from alltypesorc where cfloat;
 
-explain vectorization expression
+explain
 select count (distinct cint) from alltypesorc where ctimestamp1;
 
 select count (distinct cint) from alltypesorc where ctimestamp1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_groupby4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_groupby4.q b/ql/src/test/queries/clientpositive/vector_groupby4.q
index 1906c07..a59d1a8 100644
--- a/ql/src/test/queries/clientpositive/vector_groupby4.q
+++ b/ql/src/test/queries/clientpositive/vector_groupby4.q
@@ -12,7 +12,7 @@ CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src;
 
 CREATE TABLE dest1(c1 STRING) STORED AS ORC;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_groupby6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_groupby6.q b/ql/src/test/queries/clientpositive/vector_groupby6.q
index cb01882..89c7a19 100644
--- a/ql/src/test/queries/clientpositive/vector_groupby6.q
+++ b/ql/src/test/queries/clientpositive/vector_groupby6.q
@@ -12,7 +12,7 @@ CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src;
 
 CREATE TABLE dest1(c1 STRING) STORED AS ORC;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_groupby_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_groupby_3.q b/ql/src/test/queries/clientpositive/vector_groupby_3.q
index 299ee92..d42d7f1 100644
--- a/ql/src/test/queries/clientpositive/vector_groupby_3.q
+++ b/ql/src/test/queries/clientpositive/vector_groupby_3.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -42,7 +41,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select s, t, max(b) from vectortab2korc group by s, t;
 
 select s, t, max(b) from vectortab2korc group by s, t;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_groupby_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_groupby_mapjoin.q b/ql/src/test/queries/clientpositive/vector_groupby_mapjoin.q
index c692182..53df2aa 100644
--- a/ql/src/test/queries/clientpositive/vector_groupby_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vector_groupby_mapjoin.q
@@ -1,5 +1,5 @@
 set hive.mapred.mode=nonstrict;
-set hive.explain.user=false;
+set hive.explain.user=true;
 SET hive.vectorized.execution.enabled = true;
 set hive.fetch.task.conversion=none;
 SET hive.auto.convert.join=true;
@@ -8,7 +8,7 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 -- HIVE-12738 -- We are checking if a MapJoin after a GroupBy will work properly.
-explain vectorization expression
+explain
 select *
 from src
 where not key in

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_groupby_reduce.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q
index bafb32e..e78b57f 100644
--- a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q
+++ b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 create table store_sales_txt
 (
@@ -92,7 +91,7 @@ ss_sold_date_sk           ,
     ss_net_profit         
  from store_sales_txt;
 
-explain vectorization expression
+explain
 select 
   ss_ticket_number
 from
@@ -109,7 +108,7 @@ limit 20;
 
 
 
-explain vectorization expression
+explain
 select 
     min(ss_ticket_number) m
 from
@@ -134,7 +133,7 @@ order by m;
 
 
 
-explain vectorization expression
+explain
 select
     ss_ticket_number, sum(ss_item_sk), sum(q)
 from
@@ -158,7 +157,7 @@ group by ss_ticket_number
 order by ss_ticket_number;
 
 
-explain vectorization expression
+explain
 select
     ss_ticket_number, ss_item_sk, sum(q)
 from

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_grouping_sets.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_grouping_sets.q b/ql/src/test/queries/clientpositive/vector_grouping_sets.q
index 1b5d3a0..09ba6b6 100644
--- a/ql/src/test/queries/clientpositive/vector_grouping_sets.q
+++ b/ql/src/test/queries/clientpositive/vector_grouping_sets.q
@@ -45,7 +45,7 @@ create table store
 stored as orc as
 select * from store_txt;
 
-explain vectorization expression
+explain
 select s_store_id
  from store
  group by s_store_id with rollup;
@@ -54,7 +54,7 @@ select s_store_id
  from store
  group by s_store_id with rollup;
 
-explain vectorization expression
+explain
 select s_store_id, GROUPING__ID
  from store
  group by s_store_id with rollup;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_if_expr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_if_expr.q b/ql/src/test/queries/clientpositive/vector_if_expr.q
index 6e7ccf7..475cecf 100644
--- a/ql/src/test/queries/clientpositive/vector_if_expr.q
+++ b/ql/src/test/queries/clientpositive/vector_if_expr.q
@@ -1,9 +1,9 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT cboolean1, IF (cboolean1, 'first', 'second') FROM alltypesorc WHERE cboolean1 IS NOT NULL AND cboolean1 ORDER BY cboolean1;
 
 SELECT cboolean1, IF (cboolean1, 'first', 'second') FROM alltypesorc WHERE cboolean1 IS NOT NULL AND cboolean1 ORDER BY cboolean1 LIMIT 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_include_no_sel.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_include_no_sel.q b/ql/src/test/queries/clientpositive/vector_include_no_sel.q
index a499ae5..03f676b 100644
--- a/ql/src/test/queries/clientpositive/vector_include_no_sel.q
+++ b/ql/src/test/queries/clientpositive/vector_include_no_sel.q
@@ -6,7 +6,6 @@ SET hive.auto.convert.join=true;
 SET hive.auto.convert.join.noconditionaltask=true;
 SET hive.auto.convert.join.noconditionaltask.size=1000000000;
 SET hive.mapred.mode=nonstrict;
-set hive.fetch.task.conversion=none;
 
 -- HIVE-13872
 -- Looking for TableScan immediately followed by ReduceSink (no intervening SEL operator).
@@ -70,7 +69,7 @@ LOAD DATA LOCAL INPATH '../../data/files/customer_demographics.txt' OVERWRITE IN
 
 create table customer_demographics stored as orc as select * from customer_demographics_txt;
 
-explain vectorization expression
+explain
 select count(1) from customer_demographics,store_sales
 where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or
        (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U'));

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_inner_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_inner_join.q b/ql/src/test/queries/clientpositive/vector_inner_join.q
index 54194a8..24b66bf 100644
--- a/ql/src/test/queries/clientpositive/vector_inner_join.q
+++ b/ql/src/test/queries/clientpositive/vector_inner_join.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 CREATE TABLE orc_table_1a(a INT) STORED AS ORC;
 CREATE TABLE orc_table_2a(c INT) STORED AS ORC; 
@@ -10,12 +9,12 @@ CREATE TABLE orc_table_2a(c INT) STORED AS ORC;
 insert into table orc_table_1a values(1),(1), (2),(3);
 insert into table orc_table_2a values(0),(2), (3),(null),(4);
 
-explain vectorization detail
+explain
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2;
 
 select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2;
@@ -27,38 +26,38 @@ CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC;
 insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3);
 insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, "<NULL>"),(4, "FOUR");
 
-explain vectorization detail
+explain
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 
-explain vectorization detail
+explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
 select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2;
 
 select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2;
 
-explain vectorization detail
+explain
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2;
 
 select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2;


[03/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
index d1319b8..b311c49 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
@@ -62,16 +62,12 @@ POSTHOOK: Input: default@orc_table_2
 4	FOUR
 NULL	<NULL1>
 NULL	<NULL2>
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -87,38 +83,15 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: c (type: int), v2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -131,16 +104,9 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: v1 (type: string), a (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -148,45 +114,18 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [1]
-                          bigTableOuterKeyMapping: 1 -> 2
-                          bigTableRetainedColumns: [0, 1, 2]
-                          bigTableValueColumns: [0, 1]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0, 1, 2, 3]
-                          smallTableMapping: [3]
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: bigint, String
             Local Work:
               Map Reduce Local Work
 
@@ -216,16 +155,12 @@ one	1	NULL	NULL
 one	1	NULL	NULL
 three	3	3	THREE
 two	2	2	TWO
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -241,38 +176,15 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: v1 (type: string), a (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -285,16 +197,9 @@ STAGE PLANS:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: c (type: int), v2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -302,45 +207,18 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [0]
-                          bigTableOuterKeyMapping: 0 -> 3
-                          bigTableRetainedColumns: [0, 1, 3]
-                          bigTableValueColumns: [0, 1]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [2, 3, 0, 1]
-                          smallTableMapping: [2]
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         0 Map 1
                       Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String, bigint
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 3a7e27f..6b89fb3 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -216,22 +216,18 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select * 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
   on cd.cint = c.cint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select * 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
   on cd.cint = c.cint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -247,38 +243,15 @@ STAGE PLANS:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col2 (type: int)
                         1 _col2 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -291,16 +264,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -308,45 +274,18 @@ STAGE PLANS:
                       keys:
                         0 _col2 (type: int)
                         1 _col2 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableOuterKeyMapping: 2 -> 14
-                          bigTableRetainedColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14]
-                          bigTableValueColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
-                          smallTableMapping: [12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23]
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 16 Data size: 4403 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 16 Data size: 4403 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: bigint, bigint, bigint, bigint, Double, Double, String, String, Timestamp, Timestamp, bigint, bigint
             Local Work:
               Map Reduce Local Work
 
@@ -393,22 +332,18 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false	NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false	NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false	NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -424,38 +359,15 @@ STAGE PLANS:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col0 (type: tinyint)
                         1 _col0 (type: tinyint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -468,16 +380,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -485,42 +390,18 @@ STAGE PLANS:
                       keys:
                         0 _col0 (type: tinyint)
                         1 _col0 (type: tinyint)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [0]
-                          bigTableRetainedColumns: [0]
-                          bigTableValueColumns: [0]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0]
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 16 Data size: 4403 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 16 Data size: 4403 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -653,7 +534,7 @@ NULL
 NULL
 NULL
 NULL
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -662,7 +543,7 @@ left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -671,10 +552,6 @@ left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -690,38 +567,15 @@ STAGE PLANS:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -729,38 +583,15 @@ STAGE PLANS:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col0 (type: tinyint)
                         1 _col0 (type: tinyint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -775,16 +606,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), cint (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 2]
                     Statistics: Num rows: 15 Data size: 4003 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -792,14 +616,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableRetainedColumns: [0]
-                          bigTableValueColumns: [0]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0]
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 3
@@ -810,84 +626,32 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: tinyint)
                           1 _col0 (type: tinyint)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinOuterLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col0
                         input vertices:
                           1 Map 4
                         Statistics: Num rows: 17 Data size: 4843 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count(), sum(_col0)
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint, VectorUDAFSumLong(col 0) -> bigint
-                              className: VectorGroupByOperator
-                              vectorOutput: true
-                              native: false
-                              projectedOutputColumns: [0, 1]
                           mode: hash
                           outputColumnNames: _col0, _col1
                           Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                           Reduce Output Operator
                             sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkOperator
-                                native: false
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                                nativeConditionsNotMet: Uniform Hash IS false
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint
-                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), sum(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint, VectorUDAFSumLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index 453db4b..113c7d0 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -226,7 +226,7 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -235,7 +235,7 @@ left outer join small_alltypesorc_a hd
   on hd.cbigint = c.cbigint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -244,10 +244,6 @@ left outer join small_alltypesorc_a hd
   on hd.cbigint = c.cbigint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2
@@ -263,38 +259,15 @@ STAGE PLANS:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col0 (type: int)
                         1 _col0 (type: int)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -302,38 +275,15 @@ STAGE PLANS:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
-                      Spark Hash Table Sink Vectorization:
-                          className: VectorSparkHashTableSinkOperator
-                          native: true
                       keys:
                         0 _col1 (type: bigint)
                         1 _col0 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [3]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
 
@@ -348,16 +298,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int), cbigint (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3]
                     Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -365,14 +308,6 @@ STAGE PLANS:
                       keys:
                         0 _col0 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableRetainedColumns: [3]
-                          bigTableValueColumns: [3]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [3]
                       outputColumnNames: _col1
                       input vertices:
                         1 Map 3
@@ -383,84 +318,32 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: bigint)
                           1 _col0 (type: bigint)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [3]
-                            bigTableRetainedColumns: [3]
-                            bigTableValueColumns: [3]
-                            className: VectorMapJoinOuterLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [3]
                         outputColumnNames: _col1
                         input vertices:
                           1 Map 4
                         Statistics: Num rows: 24 Data size: 6336 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count(), sum(_col1)
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint, VectorUDAFSumLong(col 3) -> bigint
-                              className: VectorGroupByOperator
-                              vectorOutput: true
-                              native: false
-                              projectedOutputColumns: [0, 1]
                           mode: hash
                           outputColumnNames: _col0, _col1
                           Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                           Reduce Output Operator
                             sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkOperator
-                                native: false
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                                nativeConditionsNotMet: Uniform Hash IS false
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2, 3]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
             Local Work:
               Map Reduce Local Work
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint
-                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), sum(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint, VectorUDAFSumLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
index fbd294e..c5a8de5 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -226,7 +226,7 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -235,7 +235,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -244,7 +244,117 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cstring1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col1 (type: string)
+                        1 _col0 (type: string)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int), cstring1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col1 (type: string)
+                          1 _col0 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1
@@ -270,7 +380,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
 #### A masked pattern was here ####
 20
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -279,7 +389,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -288,7 +398,117 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cstring2 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col1 (type: string)
+                        1 _col0 (type: string)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cstring1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: string)
+                        1 _col0 (type: string)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: string)
+                        1 _col0 (type: string)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1
@@ -314,7 +534,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
 #### A masked pattern was here ####
 28
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -323,7 +543,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1 and hd.cint = c.cint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -332,7 +552,117 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1 and hd.cint = c.cint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cbigint (type: bigint), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col1 (type: bigint), _col3 (type: string)
+                        1 _col0 (type: bigint), _col1 (type: string)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int), cstring1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int), _col2 (type: string)
+                        1 _col0 (type: int), _col1 (type: string)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: bigint), _col3 (type: string)
+                        1 _col0 (type: bigint), _col1 (type: string)
+                      outputColumnNames: _col0, _col2
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: int), _col2 (type: string)
+                          1 _col0 (type: int), _col1 (type: string)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.cstring1

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
index b9b97f6..94860ab 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -246,19 +246,85 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select * 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
   on cd.cint = c.cint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select * 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
   on cd.cint = c.cint
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col2 (type: int)
+                        1 _col2 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col2 (type: int)
+                        1 _col2 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 33 Data size: 4825 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 33 Data size: 4825 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select * 
@@ -331,19 +397,85 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false	NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false	NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false	NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 33 Data size: 4825 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 33 Data size: 4825 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select c.ctinyint 
@@ -772,7 +904,7 @@ NULL
 NULL
 NULL
 NULL
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
@@ -781,7 +913,7 @@ left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
@@ -790,7 +922,117 @@ left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cd
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: hd
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 30 Data size: 4387 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 33 Data size: 4825 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: tinyint)
+                          1 _col0 (type: tinyint)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 36 Data size: 5307 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 select count(*) from (select c.ctinyint


[11/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
index 14606ed..6c6c6d6 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
@@ -94,16 +94,12 @@ POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@vsmb_bucket_txt
 POSTHOOK: Lineage: vsmb_bucket_txt.key SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: vsmb_bucket_txt.value SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -121,71 +117,33 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -229,16 +187,12 @@ POSTHOOK: Input: default@vsmb_bucket_2
 528534767	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
 528534767	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
 528534767	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -256,36 +210,17 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -302,10 +237,6 @@ STAGE PLANS:
                       value expressions: value (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.row.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.RCFileInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -354,7 +285,7 @@ PREHOOK: query: -- RC file does not yet provide the vectorized CommonRCFileforma
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 
-explain vectorization expression
+explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key
 PREHOOK: type: QUERY
 POSTHOOK: query: -- RC file does not yet provide the vectorized CommonRCFileformat out-of-the-box
@@ -362,13 +293,9 @@ POSTHOOK: query: -- RC file does not yet provide the vectorized CommonRCFileform
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 -- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
 
-explain vectorization expression
+explain
 select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -386,36 +313,17 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -432,10 +340,6 @@ STAGE PLANS:
                       value expressions: value (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
index 127c2c3..6e13369 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
@@ -1,4 +1,4 @@
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select 
   csmallint,
   case 
@@ -16,7 +16,7 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select 
   csmallint,
   case 
@@ -34,10 +34,6 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -52,30 +48,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36700 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1, val 418) -> boolean, FilterLongColEqualLongScalar(col 1, val 12205) -> boolean, FilterLongColEqualLongScalar(col 1, val 10583) -> boolean) -> boolean
                     predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1, 14, 15]
-                          selectExpressions: VectorUDFAdaptor(CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END)(children: LongColEqualLongScalar(col 1, val 418) -> 12:long, LongColEqualLongScalar(col 1, val 12205) -> 13:long) -> 14:String, VectorUDFAdaptor(CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END)(children: LongColEqualLongScalar(col 1, val 418) -> 12:long, LongColEqualLongScalar(col 1, val 12205) -> 13:long) -> 15:String
                       Statistics: Num rows: 6 Data size: 2228 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 2228 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -83,14 +64,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -140,7 +113,7 @@ POSTHOOK: Input: default@alltypesorc
 10583	c	c
 418	a	a
 12205	b	b
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select 
   csmallint,
   case 
@@ -158,7 +131,7 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select 
   csmallint,
   case 
@@ -176,10 +149,6 @@ where csmallint = 418
 or csmallint = 12205
 or csmallint = 10583
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -194,30 +163,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36700 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1, val 418) -> boolean, FilterLongColEqualLongScalar(col 1, val 12205) -> boolean, FilterLongColEqualLongScalar(col 1, val 10583) -> boolean) -> boolean
                     predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN (null) ELSE ('c') END (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1, 14, 15]
-                          selectExpressions: VectorUDFAdaptor(CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END)(children: LongColEqualLongScalar(col 1, val 418) -> 12:long, LongColEqualLongScalar(col 1, val 12205) -> 13:long) -> 14:String, VectorUDFAdaptor(CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN (null) ELSE ('c') END)(children: LongColEqualLongScalar(col 1, val 418) -> 12:long, LongColEqualLongScalar(col 1, val 12205) -> 13:long) -> 15:String
                       Statistics: Num rows: 6 Data size: 2228 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 2228 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -225,14 +179,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
index 0fb8552..a95702d 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
@@ -3,7 +3,7 @@ PREHOOK: query: -- SORT_QUERY_RESULTS
 -- Currently, vectorization is not supported in fetch task (hive.fetch.task.conversion=none)
 -- Test type casting in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
 -- to boolean
    cast (ctinyint as boolean)
@@ -82,7 +82,7 @@ POSTHOOK: query: -- SORT_QUERY_RESULTS
 -- Currently, vectorization is not supported in fetch task (hive.fetch.task.conversion=none)
 -- Test type casting in vectorized mode to verify end-to-end functionality.
 
-explain vectorization 
+explain 
 select 
 -- to boolean
    cast (ctinyint as boolean)
@@ -156,10 +156,6 @@ from alltypesorc
 -- limit output to a reasonably small number of rows
 where cbigint % 250 = 0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -190,14 +186,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_context.q.out b/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
index 855a50f..1f70a01 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
@@ -82,24 +82,20 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@household_demographics
 POSTHOOK: Lineage: household_demographics.hd_demo_sk SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-PREHOOK: query: explain vectorization 
+PREHOOK: query: explain 
 select store.s_city, ss_net_profit
 from store_sales
 JOIN store ON store_sales.ss_store_sk = store.s_store_sk
 JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 limit 100
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization 
+POSTHOOK: query: explain 
 select store.s_city, ss_net_profit
 from store_sales
 JOIN store ON store_sales.ss_store_sk = store.s_store_sk
 JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
 limit 100
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,14 +128,6 @@ STAGE PLANS:
                         value expressions: _col1 (type: int), _col2 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
@@ -188,14 +176,6 @@ STAGE PLANS:
                                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -215,14 +195,6 @@ STAGE PLANS:
                         Statistics: Num rows: 6075 Data size: 24300 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
index e2999a5..f45e730 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
@@ -203,7 +203,7 @@ POSTHOOK: Input: default@date_udf_flight_orc
 2010-10-31	2010-10-31 07:00:00
 2010-10-31	2010-10-31 07:00:00
 2010-10-31	2010-10-31 07:00:00
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(fl_time),
   year(fl_time),
   month(fl_time),
@@ -218,7 +218,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_time, "2000-01-01")
 FROM date_udf_flight_orc
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(fl_time),
   year(fl_time),
   month(fl_time),
@@ -233,62 +233,20 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_time, "2000-01-01")
 FROM date_udf_flight_orc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: date_udf_flight_orc
-                  Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                        selectExpressions: VectorUDFUnixTimeStampTimestamp(col 1) -> 2:long, VectorUDFYearTimestamp(col 1, field YEAR) -> 3:long, VectorUDFMonthTimestamp(col 1, field MONTH) -> 4:long, VectorUDFDayOfMonthTimestamp(col 1, field DAY_OF_MONTH) -> 5:long, VectorUDFDayOfMonthTimestamp(col 1, field DAY_OF_MONTH) -> 6:long, VectorUDFDayOfWeekTimestamp(col 1, field DAY_OF_WEEK) -> 7:long, VectorUDFWeekOfYearTimestamp(col 1, field WEEK_OF_YEAR) -> 8:long, CastTimestampToDate(col 1) -> 9:date, VectorUDFDateTimestamp(col 1) -> 10:date, VectorUDFDateAddColScalar(col 1, val 2) -> 11:date, VectorUDFDateSubColScalar(col 1, val 2) -> 12:date, VectorUDFDateDiffColScalar(col 1, val 2000-01-01) -> 13:long
-                    Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: date_udf_flight_orc
+          Select Operator
+            expressions: to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+            ListSink
 
 PREHOOK: query: SELECT
   to_unix_timestamp(fl_time),
@@ -461,7 +419,7 @@ POSTHOOK: Input: default@date_udf_flight_orc
 1288533600	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
 1288533600	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
 1288533600	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(fl_date),
   year(fl_date),
   month(fl_date),
@@ -476,7 +434,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(fl_date),
   year(fl_date),
   month(fl_date),
@@ -491,62 +449,20 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: date_udf_flight_orc
-                  Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 0, 9, 10, 11, 12]
-                        selectExpressions: VectorUDFUnixTimeStampDate(col 0) -> 2:long, VectorUDFYearDate(col 0, field YEAR) -> 3:long, VectorUDFMonthDate(col 0, field MONTH) -> 4:long, VectorUDFDayOfMonthDate(col 0, field DAY_OF_MONTH) -> 5:long, VectorUDFDayOfMonthDate(col 0, field DAY_OF_MONTH) -> 6:long, VectorUDFDayOfWeekDate(col 0, field DAY_OF_WEEK) -> 7:long, VectorUDFWeekOfYearDate(col 0, field WEEK_OF_YEAR) -> 8:long, VectorUDFDateLong(col 0) -> 9:date, VectorUDFDateAddColScalar(col 0, val 2) -> 10:date, VectorUDFDateSubColScalar(col 0, val 2) -> 11:date, VectorUDFDateDiffColScalar(col 0, val 2000-01-01) -> 12:long
-                    Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: date_udf_flight_orc
+          Select Operator
+            expressions: to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+            ListSink
 
 PREHOOK: query: SELECT
   to_unix_timestamp(fl_date),
@@ -719,7 +635,7 @@ POSTHOOK: Input: default@date_udf_flight_orc
 1288508400	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
 1288508400	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
 1288508400	2010	10	31	31	1	43	2010-10-31	2010-10-31	2010-11-02	2010-10-29	3956
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   year(fl_time) = year(fl_date),
   month(fl_time) = month(fl_date),
   day(fl_time) = day(fl_date),
@@ -733,7 +649,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   year(fl_time) = year(fl_date),
   month(fl_time) = month(fl_date),
   day(fl_time) = day(fl_date),
@@ -747,62 +663,20 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01")
 FROM date_udf_flight_orc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: date_udf_flight_orc
-                  Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4, 5, 6, 7, 8, 9, 2, 3, 12, 13, 16]
-                        selectExpressions: LongColEqualLongColumn(col 2, col 3)(children: VectorUDFYearTimestamp(col 1, field YEAR) -> 2:long, VectorUDFYearDate(col 0, field YEAR) -> 3:long) -> 4:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFMonthTimestamp(col 1, field MONTH) -> 2:long, VectorUDFMonthDate(col 0, field MONTH) -> 3:long) -> 5:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFDayOfMonthTimestamp(col 1, field DAY_OF_MONTH) -> 2:long, VectorUDFDayOfMonthDate(col 0, field DAY_OF_MONTH) -> 3:long) -> 6:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFDayOfMonthTimestamp(col 1, field DAY_OF_MONTH) -> 2:long, VectorUDFDayOfMonthDate(col 0, field DAY_OF_MONTH) -> 3:long) -> 7:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFDayOfWeekTimestamp(col 1, field DAY_OF_WEEK) -> 2:long, VectorUDFDayOfWeekDate(col 0, field DAY_OF_WEEK) -> 3:long) -> 8:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFWeekOfYearTimestamp
 (col 1, field WEEK_OF_YEAR) -> 2:long, VectorUDFWeekOfYearDate(col 0, field WEEK_OF_YEAR) -> 3:long) -> 9:long, LongColEqualLongColumn(col 10, col 0)(children: CastTimestampToDate(col 1) -> 10:date) -> 2:long, LongColEqualLongColumn(col 10, col 11)(children: VectorUDFDateTimestamp(col 1) -> 10:date, VectorUDFDateLong(col 0) -> 11:date) -> 3:long, LongColEqualLongColumn(col 10, col 11)(children: VectorUDFDateAddColScalar(col 1, val 2) -> 10:date, VectorUDFDateAddColScalar(col 0, val 2) -> 11:date) -> 12:long, LongColEqualLongColumn(col 10, col 11)(children: VectorUDFDateSubColScalar(col 1, val 2) -> 10:date, VectorUDFDateSubColScalar(col 0, val 2) -> 11:date) -> 13:long, LongColEqualLongColumn(col 14, col 15)(children: VectorUDFDateDiffColScalar(col 1, val 2000-01-01) -> 14:long, VectorUDFDateDiffColScalar(col 0, val 2000-01-01) -> 15:long) -> 16:long
-                    Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: date_udf_flight_orc
+          Select Operator
+            expressions: (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+            ListSink
 
 PREHOOK: query: -- Should all be true or NULL
 SELECT
@@ -975,7 +849,7 @@ true	true	true	true	true	true	true	true	true	true	true
 true	true	true	true	true	true	true	true	true	true	true
 true	true	true	true	true	true	true	true	true	true	true
 true	true	true	true	true	true	true	true	true	true	true
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT 
+PREHOOK: query: EXPLAIN SELECT 
   fl_date, 
   to_date(date_add(fl_date, 2)), 
   to_date(date_sub(fl_date, 2)),
@@ -984,7 +858,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) 
 FROM date_udf_flight_orc LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT 
+POSTHOOK: query: EXPLAIN SELECT 
   fl_date, 
   to_date(date_add(fl_date, 2)), 
   to_date(date_sub(fl_date, 2)),
@@ -993,68 +867,22 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) 
 FROM date_udf_flight_orc LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: date_udf_flight_orc
-                  Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: fl_date (type: date), to_date(date_add(fl_date, 2)) (type: date), to_date(date_sub(fl_date, 2)) (type: date), datediff(fl_date, date_add(fl_date, 2)) (type: int), datediff(fl_date, date_sub(fl_date, 2)) (type: int), datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 3, 4, 5, 6, 8]
-                        selectExpressions: VectorUDFDateLong(col 2)(children: VectorUDFDateAddColScalar(col 0, val 2) -> 2:date) -> 3:date, VectorUDFDateLong(col 2)(children: VectorUDFDateSubColScalar(col 0, val 2) -> 2:date) -> 4:date, VectorUDFDateDiffColCol(col 0, col 2)(children: VectorUDFDateAddColScalar(col 0, val 2) -> 2:date) -> 5:long, VectorUDFDateDiffColCol(col 0, col 2)(children: VectorUDFDateSubColScalar(col 0, val 2) -> 2:date) -> 6:long, VectorUDFDateDiffColCol(col 2, col 7)(children: VectorUDFDateAddColScalar(col 0, val 2) -> 2:date, VectorUDFDateSubColScalar(col 0, val 2) -> 7:date) -> 8:long
-                    Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                    Limit
-                      Number of rows: 10
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 10 Data size: 960 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 10 Data size: 960 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: date_udf_flight_orc
+          Select Operator
+            expressions: fl_date (type: date), to_date(date_add(fl_date, 2)) (type: date), to_date(date_sub(fl_date, 2)) (type: date), datediff(fl_date, date_add(fl_date, 2)) (type: int), datediff(fl_date, date_sub(fl_date, 2)) (type: int), datediff(date_add(fl_date, 2), date_sub(fl_date, 2)) (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+            Limit
+              Number of rows: 10
+              ListSink
 
 PREHOOK: query: SELECT 
   fl_date, 
@@ -1099,7 +927,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_udf_flight_orc
 #### A masked pattern was here ####
 2009-07-30
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   min(fl_date) AS c1,
   max(fl_date),
   count(fl_date),
@@ -1107,7 +935,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM date_udf_flight_orc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   min(fl_date) AS c1,
   max(fl_date),
   count(fl_date),
@@ -1115,10 +943,6 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM date_udf_flight_orc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1137,101 +961,43 @@ STAGE PLANS:
                 TableScan
                   alias: date_udf_flight_orc
                   Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: fl_date (type: date)
                     outputColumnNames: fl_date
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(fl_date), max(fl_date), count(fl_date), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 0) -> date, VectorUDAFMaxLong(col 0) -> date, VectorUDAFCount(col 0) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 0) -> date, VectorUDAFMaxLong(col 1) -> date, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: date)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
index 0b5d516..ced9795 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
@@ -16,14 +16,10 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dtest
 POSTHOOK: Lineage: dtest.a SCRIPT []
 POSTHOOK: Lineage: dtest.b SIMPLE []
-PREHOOK: query: explain vectorization select sum(distinct a), count(distinct a) from dtest
+PREHOOK: query: explain select sum(distinct a), count(distinct a) from dtest
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select sum(distinct a), count(distinct a) from dtest
+POSTHOOK: query: explain select sum(distinct a), count(distinct a) from dtest
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -61,23 +57,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0), count(VALUE._col1)
@@ -107,14 +88,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dtest
 #### A masked pattern was here ####
 300	1
-PREHOOK: query: explain vectorization select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc
+PREHOOK: query: explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc
+POSTHOOK: query: explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -149,23 +126,8 @@ STAGE PLANS:
                         Statistics: Num rows: 5775 Data size: 17248 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: false
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: int)
@@ -183,11 +145,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: struct<count:bigint,sum:double,variance:double>)
         Reducer 3 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col2] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0), count(VALUE._col1), avg(VALUE._col2), std(VALUE._col3)


[15/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
index 0bab7bd..4c252c7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select b from vectortab2korc order by b
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select b from vectortab2korc order by b
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,59 +128,25 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: b (type: bigint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
index 30c4f8e..a4ce890 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s, i, s2 from vectortab2korc order by s, i, s2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s, i, s2 from vectortab2korc order by s, i, s2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,59 +128,25 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: s (type: string), i (type: int), s2 (type: string)
                     outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [8, 2, 9]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                       sort order: +++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
index 21b9844..d34113c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s from vectortab2korc order by s
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s from vectortab2korc order by s
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,59 +128,25 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: s (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [8]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
index 09efd32..9571b5b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
@@ -14,24 +14,20 @@ POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.F
 POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
 WHERE cdecimal1 is not null and cdecimal2 is not null
 GROUP BY cint, cdouble, cdecimal1, cdecimal2
 ORDER BY cint, cdouble, cdecimal1, cdecimal2
 LIMIT 50
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
 WHERE cdecimal1 is not null and cdecimal2 is not null
 GROUP BY cint, cdouble, cdecimal1, cdecimal2
 ORDER BY cint, cdouble, cdecimal1, cdecimal2
 LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -50,25 +46,11 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_test
                   Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 4)(children: CastDecimalToBoolean(col 2) -> 4:long) -> boolean, SelectColumnIsNotNull(col 4)(children: CastDecimalToBoolean(col 3) -> 4:long) -> boolean) -> boolean
                     predicate: (cdecimal1 is not null and cdecimal2 is not null) (type: boolean)
                     Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(cdecimal1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinDecimal(col 2) -> decimal(20,10)
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 1, col 2, col 3
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
@@ -77,43 +59,16 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                         sort order: ++++
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
                         Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col4 (type: decimal(20,10))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinDecimal(col 4) -> decimal(20,10)
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1, col 2, col 3
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: int), KEY._col1 (type: double), KEY._col2 (type: decimal(20,10)), KEY._col3 (type: decimal(23,14))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
@@ -121,43 +76,21 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                   sort order: ++++
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                   Statistics: Num rows: 3051 Data size: 720036 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col4 (type: decimal(20,10))
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: decimal(20,10)), KEY.reducesinkkey3 (type: decimal(23,14)), VALUE._col0 (type: decimal(20,10))
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4]
                 Statistics: Num rows: 3051 Data size: 720036 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 50
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 50 Data size: 11800 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 11800 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
index 4927f17..ca3d2fa 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
@@ -97,78 +97,32 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+PREHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS `none_z_rtrim_str`
        FROM over1korc LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+POSTHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS `none_z_rtrim_str`
        FROM over1korc LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
-                  Select Operator
-                    expressions: s (type: string), concat(concat('      ', s), '      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      '))), '|') (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [7, 12, 11]
-                        selectExpressions: StringGroupColConcatStringScalar(col 11, val       )(children: StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 12:String_Family, StringGroupColConcatStringScalar(col 13, val |)(children: StringScalarConcatStringGroupCol(val |, col 11)(children: StringRTrim(col 13)(children: StringGroupColConcatStringScalar(col 11, val       )(children: StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 13:String_Family) -> 11:String) -> 13:String_Family) -> 11:String_Family
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Limit
-                      Number of rows: 20
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 20
       Processor Tree:
-        ListSink
+        TableScan
+          alias: over1korc
+          Select Operator
+            expressions: s (type: string), concat(concat('      ', s), '      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      '))), '|') (type: string)
+            outputColumnNames: _col0, _col1, _col2
+            Limit
+              Number of rows: 20
+              ListSink
 
 PREHOOK: query: SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
@@ -311,24 +265,20 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -347,25 +297,11 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), UDFToString(year(dt))) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [19]
-                        selectExpressions: StringGroupConcatColCol(col 17, col 18)(children: StringGroupColConcatStringScalar(col 18, val -)(children: StringScalarConcatStringGroupCol(val Quarter , col 17)(children: CastLongToString(col 13)(children: CastDoubleToLong(col 15)(children: DoubleColAddDoubleScalar(col 16, val 1.0)(children: DoubleColDivideDoubleScalar(col 15, val 3.0)(children: CastLongToDouble(col 14)(children: LongColSubtractLongScalar(col 13, val 1)(children: VectorUDFMonthDate(col 12, field MONTH) -> 13:long) -> 14:long) -> 15:double) -> 16:double) -> 15:double) -> 13:long) -> 17:String) -> 18:String_Family) -> 17:String_Family, CastLongToString(col 13)(children: VectorUDFYearDate(col 12, field YEAR) -> 13:long) -> 18:String) -> 19:String_Family
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 19
-                          native: false
-                          projectedOutputColumns: []
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0
@@ -374,40 +310,14 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
@@ -415,42 +325,20 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 50
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
index dedc4e6..0aa1e70 100644
--- a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
@@ -22,7 +22,7 @@ POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@test_1
 POSTHOOK: Lineage: test_1.id SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: test_1.lineid SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from test_1 where struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -36,7 +36,7 @@ struct('nine','1'),
 struct('ten','1')
 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from test_1 where struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -50,68 +50,15 @@ struct('nine','1'),
 struct('ten','1')
 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_1
-                  Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterStructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [BYTES, BYTES], structColumnMap [0, 1]) -> boolean
-                    predicate: (struct(id,lineid)) IN (const struct('two','3'), const struct('three','1'), const struct('one','1'), const struct('five','2'), const struct('six','1'), const struct('eight','1'), const struct('seven','1'), const struct('nine','1'), const struct('ten','1')) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 173 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: id (type: string), lineid (type: string)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
-                      Statistics: Num rows: 1 Data size: 173 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 173 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_2]
+      Output:["_col0","_col1"]
+      Filter Operator [FIL_4]
+        predicate:(struct(id,lineid)) IN (const struct('two','3'), const struct('three','1'), const struct('one','1'), const struct('five','2'), const struct('six','1'), const struct('eight','1'), const struct('seven','1'), const struct('nine','1'), const struct('ten','1'))
+        TableScan [TS_0]
+          Output:["id","lineid"]
 
 PREHOOK: query: select * from test_1 where struct(`id`, `lineid`)
 IN (
@@ -145,7 +92,7 @@ POSTHOOK: Input: default@test_1
 #### A masked pattern was here ####
 one	1
 seven	1
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -159,7 +106,7 @@ struct('nine','1'),
 struct('ten','1')
 ) as b from test_1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two','3'),
@@ -173,62 +120,13 @@ struct('nine','1'),
 struct('ten','1')
 ) as b from test_1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_1
-                  Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: id (type: string), lineid (type: string), (struct(id,lineid)) IN (const struct('two','3'), const struct('three','1'), const struct('one','1'), const struct('five','2'), const struct('six','1'), const struct('eight','1'), const struct('seven','1'), const struct('nine','1'), const struct('ten','1')) (type: boolean)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 3]
-                        selectExpressions: StructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [BYTES, BYTES], structColumnMap [0, 1]) -> 3:boolean
-                    Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_1]
+      Output:["_col0","_col1","_col2"]
+      TableScan [TS_0]
+        Output:["id","lineid"]
 
 PREHOOK: query: select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
@@ -282,7 +180,7 @@ POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@test_2
 POSTHOOK: Lineage: test_2.id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: test_2.lineid EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from test_2 where struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -296,7 +194,7 @@ struct(9,1),
 struct(10,1)
 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from test_2 where struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -310,68 +208,15 @@ struct(9,1),
 struct(10,1)
 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_2
-                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterStructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [LONG, LONG], structColumnMap [0, 1]) -> boolean
-                    predicate: (struct(id,lineid)) IN (const struct(2,3), const struct(3,1), const struct(1,1), const struct(5,2), const struct(6,1), const struct(8,1), const struct(7,1), const struct(9,1), const struct(10,1)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: id (type: int), lineid (type: int)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_2]
+      Output:["_col0","_col1"]
+      Filter Operator [FIL_4]
+        predicate:(struct(id,lineid)) IN (const struct(2,3), const struct(3,1), const struct(1,1), const struct(5,2), const struct(6,1), const struct(8,1), const struct(7,1), const struct(9,1), const struct(10,1))
+        TableScan [TS_0]
+          Output:["id","lineid"]
 
 PREHOOK: query: select * from test_2 where struct(`id`, `lineid`)
 IN (
@@ -405,7 +250,7 @@ POSTHOOK: Input: default@test_2
 #### A masked pattern was here ####
 1	1
 7	1
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -419,7 +264,7 @@ struct(9,1),
 struct(10,1)
 ) as b from test_2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct(2,3),
@@ -433,62 +278,13 @@ struct(9,1),
 struct(10,1)
 ) as b from test_2
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_2
-                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: id (type: int), lineid (type: int), (struct(id,lineid)) IN (const struct(2,3), const struct(3,1), const struct(1,1), const struct(5,2), const struct(6,1), const struct(8,1), const struct(7,1), const struct(9,1), const struct(10,1)) (type: boolean)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 3]
-                        selectExpressions: StructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [LONG, LONG], structColumnMap [0, 1]) -> 3:boolean
-                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_1]
+      Output:["_col0","_col1","_col2"]
+      TableScan [TS_0]
+        Output:["id","lineid"]
 
 PREHOOK: query: select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
@@ -542,7 +338,7 @@ POSTHOOK: Input: default@values__tmp__table__3
 POSTHOOK: Output: default@test_3
 POSTHOOK: Lineage: test_3.id SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: test_3.lineid EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from test_3 where struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -556,7 +352,7 @@ struct('nine',1),
 struct('ten',1)
 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from test_3 where struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -570,68 +366,15 @@ struct('nine',1),
 struct('ten',1)
 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_3
-                  Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterStructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [BYTES, LONG], structColumnMap [0, 1]) -> boolean
-                    predicate: (struct(id,lineid)) IN (const struct('two',3), const struct('three',1), const struct('one',1), const struct('five',2), const struct('six',1), const struct('eight',1), const struct('seven',1), const struct('nine',1), const struct('ten',1)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: id (type: string), lineid (type: int)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
-                      Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_2]
+      Output:["_col0","_col1"]
+      Filter Operator [FIL_4]
+        predicate:(struct(id,lineid)) IN (const struct('two',3), const struct('three',1), const struct('one',1), const struct('five',2), const struct('six',1), const struct('eight',1), const struct('seven',1), const struct('nine',1), const struct('ten',1))
+        TableScan [TS_0]
+          Output:["id","lineid"]
 
 PREHOOK: query: select * from test_3 where struct(`id`, `lineid`)
 IN (
@@ -665,7 +408,7 @@ POSTHOOK: Input: default@test_3
 #### A masked pattern was here ####
 one	1
 seven	1
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -679,7 +422,7 @@ struct('nine',1),
 struct('ten',1)
 ) as b from test_3
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
 struct('two',3),
@@ -693,62 +436,13 @@ struct('nine',1),
 struct('ten',1)
 ) as b from test_3
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_3
-                  Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
-                  Select Operator
-                    expressions: id (type: string), lineid (type: int), (struct(id,lineid)) IN (const struct('two',3), const struct('three',1), const struct('one',1), const struct('five',2), const struct('six',1), const struct('eight',1), const struct('seven',1), const struct('nine',1), const struct('ten',1)) (type: boolean)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 3]
-                        selectExpressions: StructColumnInList(structExpressions [col 0, col 1], fieldVectorColumnTypes [BYTES, LONG], structColumnMap [0, 1]) -> 3:boolean
-                    Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_1]
+      Output:["_col0","_col1","_col2"]
+      TableScan [TS_0]
+        Output:["id","lineid"]
 
 PREHOOK: query: select `id`, `lineid`, struct(`id`, `lineid`)
 IN (
@@ -803,7 +497,7 @@ POSTHOOK: Output: default@test_4
 POSTHOOK: Lineage: test_4.my_bigint EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: test_4.my_double EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
 POSTHOOK: Lineage: test_4.my_string SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select * from test_4 where struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),
@@ -818,7 +512,7 @@ struct(1L, "a", 0.5D),
 struct(3L, "b", 1.5D)
 )
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select * from test_4 where struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),
@@ -833,68 +527,15 @@ struct(1L, "a", 0.5D),
 struct(3L, "b", 1.5D)
 )
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_4
-                  Statistics: Num rows: 3 Data size: 303 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterStructColumnInList(structExpressions [col 0, col 1, col 2], fieldVectorColumnTypes [LONG, BYTES, DOUBLE], structColumnMap [0, 1, 2]) -> boolean
-                    predicate: (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double)
-                      outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1, 2]
-                      Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_2]
+      Output:["_col0","_col1","_col2"]
+      Filter Operator [FIL_4]
+        predicate:(struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5))
+        TableScan [TS_0]
+          Output:["my_bigint","my_string","my_double"]
 
 PREHOOK: query: select * from test_4 where struct(`my_bigint`, `my_string`, `my_double`)
 IN (
@@ -929,7 +570,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_4
 #### A masked pattern was here ####
 1	a	0.5
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select `my_bigint`, `my_string`, `my_double`, struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),
@@ -944,7 +585,7 @@ struct(1L, "a", 0.5D),
 struct(3L, "b", 1.5D)
 ) as b from test_4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select `my_bigint`, `my_string`, `my_double`, struct(`my_bigint`, `my_string`, `my_double`)
 IN (
 struct(1L, "a", 1.5D),
@@ -959,62 +600,13 @@ struct(1L, "a", 0.5D),
 struct(3L, "b", 1.5D)
 ) as b from test_4
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: test_4
-                  Statistics: Num rows: 3 Data size: 303 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2]
-                  Select Operator
-                    expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double), (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 4]
-                        selectExpressions: StructColumnInList(structExpressions [col 0, col 1, col 2], fieldVectorColumnTypes [LONG, BYTES, DOUBLE], structColumnMap [0, 1, 2]) -> 4:boolean
-                    Statistics: Num rows: 3 Data size: 303 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 3 Data size: 303 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Select Operator [SEL_1]
+      Output:["_col0","_col1","_col2","_col3"]
+      TableScan [TS_0]
+        Output:["my_bigint","my_string","my_double"]
 
 PREHOOK: query: select `my_bigint`, `my_string`, `my_double`, struct(`my_bigint`, `my_string`, `my_double`)
 IN (

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
index 5979f8b..7d14256 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
@@ -121,16 +121,12 @@ POSTHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi v
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_lazy_binary_columnar
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -147,23 +143,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: CAST( t AS varchar(10)) (type: varchar(10)), CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), CAST( s AS varchar(50)) (type: varchar(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19]
-                        selectExpressions: CastLongToVarChar(col 0, maxLength 10) -> 13:VarChar, CastLongToVarChar(col 1, maxLength 10) -> 14:VarChar, CastLongToVarChar(col 2, maxLength 20) -> 15:VarChar, CastLongToVarChar(col 3, maxLength 30) -> 16:VarChar, VectorUDFAdaptor(CAST( f AS varchar(20))) -> 17:varchar(20), VectorUDFAdaptor(CAST( d AS varchar(20))) -> 18:varchar(20), CastStringGroupToVarChar(col 8, maxLength 50) -> 19:VarChar
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -172,14 +157,6 @@ STAGE PLANS:
                           name: default.varchar_lazy_binary_columnar
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-2
     Dependency Collection

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
index 09259c8..9c2c536 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
@@ -125,15 +125,11 @@ POSTHOOK: Output: default@varchar_join1_str_orc
 POSTHOOK: Lineage: varchar_join1_str_orc.c1 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
 POSTHOOK: Lineage: varchar_join1_str_orc.c2 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join varchar with same length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join varchar with same length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -176,14 +172,6 @@ STAGE PLANS:
                           value expressions: _col1 (type: varchar(10)), _col2 (type: int), _col3 (type: varchar(10))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -204,23 +192,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(10)), VALUE._col1 (type: int), VALUE._col2 (type: varchar(10))
@@ -252,15 +225,11 @@ POSTHOOK: Input: default@varchar_join1_vc1_orc
 2	abc 	2	abc 
 3	 abc	3	 abc
 PREHOOK: query: -- Join varchar with different length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join varchar with different length varchar
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -303,14 +272,6 @@ STAGE PLANS:
                           value expressions: _col1 (type: varchar(10)), _col2 (type: int), _col3 (type: varchar(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -331,23 +292,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(10)), VALUE._col1 (type: int), VALUE._col2 (type: varchar(20))
@@ -381,15 +327,11 @@ POSTHOOK: Input: default@varchar_join1_vc2_orc
 2	abc 	2	abc 
 3	 abc	3	 abc
 PREHOOK: query: -- Join varchar with string
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join varchar with string
-explain vectorization select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -432,14 +374,6 @@ STAGE PLANS:
                           value expressions: _col1 (type: varchar(10)), _col2 (type: int), _col3 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -460,23 +394,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(10)), VALUE._col1 (type: int), VALUE._col2 (type: string)


[33/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
index ce05391..a510e38 100644
--- a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
@@ -97,18 +97,14 @@ POSTHOOK: Lineage: hundredorc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:
 POSTHOOK: Lineage: hundredorc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: hundredorc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: hundredorc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN 
 SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.bin = t2.bin
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN 
 SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.bin = t2.bin
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -159,12 +155,6 @@ STAGE PLANS:
                               value expressions: _col0 (type: bigint)
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Predicate expression for FILTER operator: org.apache.hadoop.hive.ql.metadata.HiveException: No vector type for SelectColumnIsNotNull argument #0 type name Binary
-                vectorized: false
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -185,38 +175,16 @@ STAGE PLANS:
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2))
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Predicate expression for FILTER operator: org.apache.hadoop.hive.ql.metadata.HiveException: No vector type for SelectColumnIsNotNull argument #0 type name Binary
-                vectorized: false
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -240,20 +208,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@hundredorc
 #### A masked pattern was here ####
 -27832781952
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN 
 SELECT count(*), bin
 FROM hundredorc
 GROUP BY bin
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN 
 SELECT count(*), bin
 FROM hundredorc
 GROUP BY bin
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -271,26 +235,12 @@ STAGE PLANS:
                 TableScan
                   alias: hundredorc
                   Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Select Operator
                     expressions: bin (type: binary)
                     outputColumnNames: bin
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [10]
                     Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 10
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: bin (type: binary)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -299,41 +249,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: binary)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: binary)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: binary)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -341,16 +265,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: bigint), _col0 (type: binary)
                   outputColumnNames: _col0, _col1
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0]
                   Statistics: Num rows: 50 Data size: 14819 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 14819 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -403,20 +320,16 @@ POSTHOOK: Input: default@hundredorc
 3	zync studies
 PREHOOK: query: -- HIVE-14045: Involve a binary vector scratch column for small table result (Native Vector MapJoin).
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT t1.i, t1.bin, t2.bin
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.i = t2.i
 PREHOOK: type: QUERY
 POSTHOOK: query: -- HIVE-14045: Involve a binary vector scratch column for small table result (Native Vector MapJoin).
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT t1.i, t1.bin, t2.bin
 FROM hundredorc t1 JOIN hundredorc t2 ON t1.i = t2.i
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -434,23 +347,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: i is not null (type: boolean)
                     Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: i (type: int), bin (type: binary)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 10]
                       Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -458,10 +360,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1, _col3
                         input vertices:
                           1 Map 2
@@ -469,16 +367,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: int), _col1 (type: binary), _col3 (type: binary)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 10, 11]
                           Statistics: Num rows: 110 Data size: 32601 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            File Sink Vectorization:
-                                className: VectorFileSinkOperator
-                                native: false
                             Statistics: Num rows: 110 Data size: 32601 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -486,57 +377,26 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: i is not null (type: boolean)
                     Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: i (type: int), bin (type: binary)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 10]
                       Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: binary)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_bround.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bround.q.out b/ql/src/test/results/clientpositive/llap/vector_bround.q.out
index 6adec76..05fac27 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bround.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bround.q.out
@@ -34,22 +34,19 @@ POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@test_vector_bround
 POSTHOOK: Lineage: test_vector_bround.v0 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: test_vector_bround.v1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-PREHOOK: query: explain vectorization select bround(v0), bround(v1, 1) from test_vector_bround
+PREHOOK: query: explain select bround(v0), bround(v1, 1) from test_vector_bround
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select bround(v0), bround(v1, 1) from test_vector_bround
+POSTHOOK: query: explain select bround(v0), bround(v1, 1) from test_vector_bround
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-0
   Fetch Operator
     limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=8 width=16)
-          Output:["_col0","_col1"]
-          TableScan [TS_0] (rows=8 width=16)
-            default@test_vector_bround,test_vector_bround,Tbl:COMPLETE,Col:NONE,Output:["v0","v1"]
+    Select Operator [SEL_1]
+      Output:["_col0","_col1"]
+      TableScan [TS_0]
+        Output:["v0","v1"]
 
 PREHOOK: query: select bround(v0), bround(v1, 1) from test_vector_bround
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
index c2af524..814ac75 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
@@ -6,16 +6,12 @@ POSTHOOK: query: CREATE TABLE non_orc_table(a INT, b STRING) CLUSTERED BY(a) INT
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@non_orc_table
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert into table non_orc_table values(1, 'one'),(1, 'one'), (2, 'two'),(3, 'three')
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert into table non_orc_table values(1, 'one'),(1, 'one'), (2, 'two'),(3, 'three')
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -46,34 +42,15 @@ STAGE PLANS:
                       value expressions: _col0 (type: string), _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [2, 1]
-                    selectExpressions: VectorUDFAdaptor(UDFToInteger(VALUE._col0)) -> 2:Long
                 Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
index 14a10fc..cd67e7e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -97,24 +97,20 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT 
+PREHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
   FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT 
+POSTHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
   FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -133,27 +129,12 @@ STAGE PLANS:
                 TableScan
                   alias: over1korc
                   Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                   Select Operator
                     expressions: i (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: avg(50), avg(50.0), avg(50)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 11:long) -> struct<count:bigint,sum:double>, VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 12:double) -> struct<count:bigint,sum:double>, VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 13:decimal(10,0)) -> struct<count:bigint,sum:decimal(20,0)>
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          keyExpressions: col 2
-                          native: false
-                          projectedOutputColumns: [0, 1, 2]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 11:long) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 12:double) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 13:decimal(10,0)) -> struct<count:bigint,sum:decimal(20,0)> output type STRUCT requires PRIMITIVE IS false
                       keys: _col0 (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
@@ -167,21 +148,8 @@ STAGE PLANS:
                         value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
@@ -197,33 +165,16 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index 59aea35..b7b2ba5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -47,22 +47,18 @@ val_10	10	1
 val_100	200	2
 val_103	206	2
 val_104	208	2
-PREHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+PREHOOK: query: explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value asc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+POSTHOOK: query: explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value asc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -81,27 +77,12 @@ STAGE PLANS:
                 TableScan
                   alias: char_2
                   Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: value (type: char(20)), UDFToInteger(key) (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
-                        selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) -> 2:Long
                     Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 2) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 1
-                          native: false
-                          projectedOutputColumns: [0, 1]
                       keys: _col0 (type: char(20))
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -110,43 +91,16 @@ STAGE PLANS:
                         key expressions: _col0 (type: char(20))
                         sort order: +
                         Map-reduce partition columns: _col0 (type: char(20))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
                         Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: bigint), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0), count(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 1) -> bigint, VectorUDAFCountMerge(col 2) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 keys: KEY._col0 (type: char(20))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -154,43 +108,21 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: char(20))
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
                 Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 5
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -247,22 +179,18 @@ val_97	194	2
 val_96	96	1
 val_95	190	2
 val_92	92	1
-PREHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+PREHOOK: query: explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value desc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows
+POSTHOOK: query: explain select value, sum(cast(key as int)), count(*) numrows
 from char_2
 group by value
 order by value desc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -281,27 +209,12 @@ STAGE PLANS:
                 TableScan
                   alias: char_2
                   Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: value (type: char(20)), UDFToInteger(key) (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2]
-                        selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) -> 2:Long
                     Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 2) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 1
-                          native: false
-                          projectedOutputColumns: [0, 1]
                       keys: _col0 (type: char(20))
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -310,43 +223,16 @@ STAGE PLANS:
                         key expressions: _col0 (type: char(20))
                         sort order: -
                         Map-reduce partition columns: _col0 (type: char(20))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
                         Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: bigint), _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0), count(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 1) -> bigint, VectorUDAFCountMerge(col 2) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 keys: KEY._col0 (type: char(20))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -354,43 +240,21 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: char(20))
                   sort order: -
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2]
                 Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 5
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
index d164ebe..6d55ab0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
@@ -121,16 +121,12 @@ POSTHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_lazy_binary_columnar
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -147,23 +143,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: CAST( t AS CHAR(10) (type: char(10)), CAST( si AS CHAR(10) (type: char(10)), CAST( i AS CHAR(20) (type: char(20)), CAST( b AS CHAR(30) (type: char(30)), CAST( f AS CHAR(20) (type: char(20)), CAST( d AS CHAR(20) (type: char(20)), CAST( s AS CHAR(50) (type: char(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19]
-                        selectExpressions: CastLongToChar(col 0, maxLength 10) -> 13:Char, CastLongToChar(col 1, maxLength 10) -> 14:Char, CastLongToChar(col 2, maxLength 20) -> 15:Char, CastLongToChar(col 3, maxLength 30) -> 16:Char, VectorUDFAdaptor(CAST( f AS CHAR(20)) -> 17:char(20), VectorUDFAdaptor(CAST( d AS CHAR(20)) -> 18:char(20), CastStringGroupToChar(col 8, maxLength 50) -> 19:Char
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -172,14 +157,6 @@ STAGE PLANS:
                           name: default.char_lazy_binary_columnar
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-2
     Dependency Collection

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
index 57ae96b..1af8b3d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
@@ -125,15 +125,11 @@ POSTHOOK: Output: default@char_join1_str_orc
 POSTHOOK: Lineage: char_join1_str_orc.c1 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
 POSTHOOK: Lineage: char_join1_str_orc.c2 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join char with same length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join char with same length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -152,23 +148,12 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: char(10))
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -176,10 +161,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: char(10))
                           1 _col1 (type: char(10))
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           1 Map 3
@@ -187,89 +168,39 @@ STAGE PLANS:
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(10))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: char(10))
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: char(10))
                         sort order: +
                         Map-reduce partition columns: _col1 (type: char(10))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(10)), VALUE._col1 (type: int), VALUE._col2 (type: char(10))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -300,15 +231,11 @@ POSTHOOK: Input: default@char_join1_vc1_orc
 2	abc       	2	abc       
 3	 abc      	3	 abc      
 PREHOOK: query: -- Join char with different length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join char with different length char
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -327,66 +254,32 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: char(10))
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: char(20))
                         sort order: +
                         Map-reduce partition columns: _col1 (type: char(20))
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 3 Data size: 324 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 324 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: char(20))
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 324 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -394,10 +287,6 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: char(20))
                           1 _col1 (type: char(20))
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           0 Map 1
@@ -405,46 +294,19 @@ STAGE PLANS:
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(10)), VALUE._col1 (type: int), VALUE._col2 (type: char(20))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -477,15 +339,11 @@ POSTHOOK: Input: default@char_join1_vc2_orc
 2	abc       	2	abc                 
 3	 abc      	3	 abc                
 PREHOOK: query: -- Join char with string
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Join char with string
-explain vectorization expression select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1
+explain select * from char_join1_vc1_orc a join char_join1_str_orc b on (a.c2 = b.c2) order by a.c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -504,23 +362,12 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: char(10))
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -528,11 +375,6 @@ STAGE PLANS:
                         keys:
                           0 UDFToString(_col1) (type: string)
                           1 _col1 (type: string)
-                        Map Join Vectorization:
-                            bigTableKeyExpressions: CastStringGroupToString(col 1) -> 2:String
-                            className: VectorMapJoinInnerStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1, _col2, _col3
                         input vertices:
                           1 Map 3
@@ -540,89 +382,39 @@ STAGE PLANS:
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkOperator
-                              native: false
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              nativeConditionsNotMet: Uniform Hash IS false
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 3 Data size: 273 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
                     predicate: c2 is not null (type: boolean)
                     Statistics: Num rows: 3 Data size: 273 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c1 (type: int), c2 (type: string)
                       outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 1]
                       Statistics: Num rows: 3 Data size: 273 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col1 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col1 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 273 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(10)), VALUE._col1 (type: int), VALUE._col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
index 73b7759..3dea73d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
@@ -45,20 +45,16 @@ POSTHOOK: Input: default@src
 0	val_0
 10	val_10
 100	val_100
-PREHOOK: query: explain vectorization only select key, value
+PREHOOK: query: explain select key, value
 from char_2
 order by key asc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only select key, value
+POSTHOOK: query: explain select key, value
 from char_2
 order by key asc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -66,32 +62,51 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: char_2
+                  Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: char(10)), value (type: char(20))
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: char(10))
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
+                      value expressions: _col1 (type: char(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: char(10)), VALUE._col0 (type: char(20))
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: -- should match the query from src
 select key, value
@@ -133,20 +148,16 @@ POSTHOOK: Input: default@src
 97	val_97
 97	val_97
 96	val_96
-PREHOOK: query: explain vectorization only select key, value
+PREHOOK: query: explain select key, value
 from char_2
 order by key desc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only select key, value
+POSTHOOK: query: explain select key, value
 from char_2
 order by key desc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -154,32 +165,51 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: char_2
+                  Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: char(10)), value (type: char(20))
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: char(10))
+                      sort order: -
+                      Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
+                      value expressions: _col1 (type: char(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: char(10)), VALUE._col0 (type: char(20))
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 5
+                  Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 990 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: -- should match the query from src
 select key, value
@@ -224,16 +254,12 @@ create table char_3 (
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_3
-PREHOOK: query: explain vectorization only operator
+PREHOOK: query: explain
 insert into table char_3 select cint from alltypesorc limit 10
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization only operator
+POSTHOOK: query: explain
 insert into table char_3 select cint from alltypesorc limit 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -243,63 +269,68 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Tez
+#### A masked pattern was here ####
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
-                  TableScan Vectorization:
-                      native: true
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
+                    Limit
+                      Number of rows: 10
+                      Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        selectExpressions: CastLongToChar(col 0, maxLength 12) -> 1:Char
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
+              Select Operator
+                expressions: VALUE._col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: CAST( _col0 AS CHAR(12) (type: char(12))
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 960 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 10 Data size: 960 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                          name: default.char_3
 
   Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.char_3
 
   Stage: Stage-3
+    Stats-Aggr Operator
 
 PREHOOK: query: insert into table char_3 select cint from alltypesorc limit 10
 PREHOOK: type: QUERY


[30/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
index 5d28d22..882e83d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
@@ -35,7 +35,7 @@ PREHOOK: query: -- EXPLAIN
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
 -- FROM decimal_tbl_1_orc ORDER BY dec;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3),
   round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4),
@@ -51,17 +51,13 @@ POSTHOOK: query: -- EXPLAIN
 -- round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
 -- FROM decimal_tbl_1_orc ORDER BY dec;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT
   round(dec) as d, round(dec, 0), round(dec, 1), round(dec, 2), round(dec, 3),
   round(dec, -1), round(dec, -2), round(dec, -3), round(dec, -4),
   round(dec, -5), round(dec, -6), round(dec, -7), round(dec, -8)
 FROM decimal_tbl_1_orc ORDER BY d
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -79,61 +75,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_1_orc
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: round(dec) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                        selectExpressions: FuncRoundDecimalToDecimal(col 0) -> 1:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 0) -> 2:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 1) -> 3:decimal(22,1), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 2) -> 4:decimal(23,2), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 3) -> 5:decimal(24,3), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 6:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -2) -> 7:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -3) -> 8:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -4) -> 9:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -5) -> 10:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -6) -> 11:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -7)
  -> 12:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -8) -> 13:decimal(21,0)
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(21,0))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0))
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -192,7 +153,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_2_orc
 #### A masked pattern was here ####
 125.315000000000000000	-125.315000000000000000
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
   round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4),
@@ -202,7 +163,7 @@ SELECT
   round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4)
 FROM decimal_tbl_2_orc ORDER BY p
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT
   round(pos) as p, round(pos, 0),
   round(pos, 1), round(pos, 2), round(pos, 3), round(pos, 4),
@@ -212,10 +173,6 @@ SELECT
   round(neg, -1), round(neg, -2), round(neg, -3), round(neg, -4)
 FROM decimal_tbl_2_orc ORDER BY p
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -233,61 +190,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_2_orc
                   Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: round(pos) (type: decimal(21,0)), round(pos, 0) (type: decimal(21,0)), round(pos, 1) (type: decimal(22,1)), round(pos, 2) (type: decimal(23,2)), round(pos, 3) (type: decimal(24,3)), round(pos, 4) (type: decimal(25,4)), round(pos, -1) (type: decimal(21,0)), round(pos, -2) (type: decimal(21,0)), round(pos, -3) (type: decimal(21,0)), round(pos, -4) (type: decimal(21,0)), round(neg) (type: decimal(21,0)), round(neg, 0) (type: decimal(21,0)), round(neg, 1) (type: decimal(22,1)), round(neg, 2) (type: decimal(23,2)), round(neg, 3) (type: decimal(24,3)), round(neg, 4) (type: decimal(25,4)), round(neg, -1) (type: decimal(21,0)), round(neg, -2) (type: decimal(21,0)), round(neg, -3) (type: decimal(21,0)), round(neg, -4) (type: decimal(21,0))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                        selectExpressions: FuncRoundDecimalToDecimal(col 0) -> 2:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 0) -> 3:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 1) -> 4:decimal(22,1), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 2) -> 5:decimal(23,2), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 3) -> 6:decimal(24,3), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 4) -> 7:decimal(25,4), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 8:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -2) -> 9:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -3) -> 10:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -4) -> 11:decimal(21,0), FuncRoundDecimalToDecimal(col 1) -> 12:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces 0) -> 13:decimal(21,0), FuncRoundW
 ithNumDigitsDecimalToDecimal(col 1, decimalPlaces 1) -> 14:decimal(22,1), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces 2) -> 15:decimal(23,2), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces 3) -> 16:decimal(24,3), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces 4) -> 17:decimal(25,4), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces -1) -> 18:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces -2) -> 19:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces -3) -> 20:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces -4) -> 21:decimal(21,0)
                     Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(21,0))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(25,4)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(22,1)), _col13 (type: decimal(23,2)), _col14 (type: decimal(24,3)), _col15 (type: decimal(25,4)), _col16 (type: decimal(21,0)), _col17 (type: decimal(21,0)), _col18 (type: decimal(21,0)), _col19 (type: decimal(21,0))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(22,1)), VALUE._col2 (type: decimal(23,2)), VALUE._col3 (type: decimal(24,3)), VALUE._col4 (type: decimal(25,4)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(22,1)), VALUE._col12 (type: decimal(23,2)), VALUE._col13 (type: decimal(24,3)), VALUE._col14 (type: decimal(25,4)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(21,0)), VALUE._col17 (type: decimal(21,0)), VALUE._col18 (type: decimal(21,0))
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -351,7 +273,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_3_orc
 #### A masked pattern was here ####
 3.141592653589793000
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
   round(dec, -13), round(dec, -14),
@@ -372,7 +294,7 @@ SELECT
   round(dec, 15), round(dec, 16)
 FROM decimal_tbl_3_orc ORDER BY d
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT
   round(dec, -15) as d, round(dec, -16),
   round(dec, -13), round(dec, -14),
@@ -393,10 +315,6 @@ SELECT
   round(dec, 15), round(dec, 16)
 FROM decimal_tbl_3_orc ORDER BY d
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -414,61 +332,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_3_orc
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: round(dec, -15) (type: decimal(21,0)), round(dec, -16) (type: decimal(21,0)), round(dec, -5) (type: decimal(21,0)), round(dec, -6) (type: decimal(21,0)), round(dec, -3) (type: decimal(21,0)), round(dec, -4) (type: decimal(21,0)), round(dec, -1) (type: decimal(21,0)), round(dec, -2) (type: decimal(21,0)), round(dec, 0) (type: decimal(21,0)), round(dec, 1) (type: decimal(22,1)), round(dec, 2) (type: decimal(23,2)), round(dec, 3) (type: decimal(24,3)), round(dec, -13) (type: decimal(21,0)), round(dec, 4) (type: decimal(25,4)), round(dec, 5) (type: decimal(26,5)), round(dec, 6) (type: decimal(27,6)), round(dec, 7) (type: decimal(28,7)), round(dec, 8) (type: decimal(29,8)), round(dec, 9) (type: decimal(30,9)), round(dec, 10) (type: decimal(31,10)), round(dec, 11) (type: decimal(32,11)), round(dec, 12) (type: decimal(33,12)), round(dec, 13) (type: decimal(34,13)), round(dec, -14) (type: decimal(21,0)), round(dec, 14) (type: decimal(35,14)), round(dec, 15) 
 (type: decimal(36,15)), round(dec, 16) (type: decimal(37,16)), round(dec, -11) (type: decimal(21,0)), round(dec, -12) (type: decimal(21,0)), round(dec, -9) (type: decimal(21,0)), round(dec, -10) (type: decimal(21,0)), round(dec, -7) (type: decimal(21,0)), round(dec, -8) (type: decimal(21,0))
                     outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col2, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col3, _col31, _col32, _col33, _col4, _col5, _col6, _col7, _col8, _col9
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]
-                        selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -15) -> 1:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -16) -> 2:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -5) -> 3:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -6) -> 4:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -3) -> 5:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -4) -> 6:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 7:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -2) -> 8:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 0) -> 9:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 1) -> 10:decimal(22,1), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 2) -> 11:decimal(23,2), FuncRoundWithNumDigitsDecimal
 ToDecimal(col 0, decimalPlaces 3) -> 12:decimal(24,3), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -13) -> 13:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 4) -> 14:decimal(25,4), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 5) -> 15:decimal(26,5), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 6) -> 16:decimal(27,6), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 7) -> 17:decimal(28,7), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 8) -> 18:decimal(29,8), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 9) -> 19:decimal(30,9), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 10) -> 20:decimal(31,10), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 11) -> 21:decimal(32,11), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 12) -> 22:decimal(33,12), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 13) -> 23:decimal(34,13), FuncRoun
 dWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -14) -> 24:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 14) -> 25:decimal(35,14), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 15) -> 26:decimal(36,15), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 16) -> 27:decimal(37,16), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -11) -> 28:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -12) -> 29:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -9) -> 30:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -10) -> 31:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -7) -> 32:decimal(21,0), FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -8) -> 33:decimal(21,0)
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(21,0))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(21,0)), _col3 (type: decimal(21,0)), _col4 (type: decimal(21,0)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)), _col13 (type: decimal(21,0)), _col14 (type: decimal(21,0)), _col15 (type: decimal(21,0)), _col16 (type: decimal(21,0)), _col17 (type: decimal(22,1)), _col18 (type: decimal(23,2)), _col19 (type: decimal(24,3)), _col20 (type: decimal(25,4)), _col21 (type: decimal(26,5)), _col22 (type: decimal(27,6)), _col23 (type: decimal(28,7)), _col24 (type: decimal(29,8)), _col25 (type: decimal(30,9)), _col26 (type: decimal(31,10)), _col27 (type: decimal(32,11)), _col28 (type: decimal(33,12)), _col29 (type: decimal(34,13)), _col31 (type: decimal(35,14)), _col32 (type: decimal(36,15)), _col33 (type: decimal(37,16))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(21,0)), VALUE._col0 (type: decimal(21,0)), VALUE._col1 (type: decimal(21,0)), VALUE._col2 (type: decimal(21,0)), VALUE._col3 (type: decimal(21,0)), VALUE._col4 (type: decimal(21,0)), VALUE._col5 (type: decimal(21,0)), VALUE._col6 (type: decimal(21,0)), VALUE._col7 (type: decimal(21,0)), VALUE._col8 (type: decimal(21,0)), VALUE._col9 (type: decimal(21,0)), VALUE._col10 (type: decimal(21,0)), VALUE._col11 (type: decimal(21,0)), VALUE._col12 (type: decimal(21,0)), VALUE._col13 (type: decimal(21,0)), VALUE._col14 (type: decimal(21,0)), VALUE._col15 (type: decimal(21,0)), VALUE._col16 (type: decimal(22,1)), VALUE._col17 (type: decimal(23,2)), VALUE._col18 (type: decimal(24,3)), VALUE._col19 (type: decimal(25,4)), VALUE._col20 (type: decimal(26,5)), VALUE._col21 (type: decimal(27,6)), VALUE._col22 (type: decimal(28,7)), VALUE._col23 (type: decimal(29,8)), VALUE._col24 (type: decimal(30,9)), VALUE._col25 (type: decimal(31,10)),
  VALUE._col26 (type: decimal(32,11)), VALUE._col27 (type: decimal(33,12)), VALUE._col28 (type: decimal(34,13)), VALUE._col28 (type: decimal(34,13)), VALUE._col29 (type: decimal(35,14)), VALUE._col30 (type: decimal(36,15)), VALUE._col31 (type: decimal(37,16))
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, 31, 32]
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -555,18 +438,14 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_tbl_4_orc
 #### A masked pattern was here ####
 1809242.315111134400000000	-1809242.315111134400000000
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT round(pos, 9) as p, round(neg, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
 FROM decimal_tbl_4_orc ORDER BY p
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -584,62 +463,26 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_tbl_4_orc
                   Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9))
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3]
-                        selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces 9) -> 2:decimal(30,9), FuncRoundWithNumDigitsDecimalToDecimal(col 1, decimalPlaces 9) -> 3:decimal(30,9)
                     Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(30,9))
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(30,9))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
-                    selectExpressions: ConstantVectorExpression(val 1809242.315111134) -> 2:decimal(17,9), ConstantVectorExpression(val -1809242.315111134) -> 3:decimal(17,9)
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
index 90f3371..5ea9f4d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
@@ -48,18 +48,14 @@ POSTHOOK: Input: default@decimal_udf2_txt
 POSTHOOK: Output: default@decimal_udf2
 POSTHOOK: Lineage: decimal_udf2.key SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ]
 POSTHOOK: Lineage: decimal_udf2.value SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:value, type:int, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key)
 FROM DECIMAL_UDF2 WHERE key = 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key)
 FROM DECIMAL_UDF2 WHERE key = 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -74,30 +70,15 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf2
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColEqualDecimalScalar(col 0, val 10) -> boolean
                     predicate: (key = 10) (type: boolean)
                     Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: NaN (type: double), NaN (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8]
-                          selectExpressions: ConstantVectorExpression(val NaN) -> 2:double, ConstantVectorExpression(val NaN) -> 3:double, ConstantVectorExpression(val 1.4711276743037347) -> 4:double, ConstantVectorExpression(val -0.8390715290764524) -> 5:double, ConstantVectorExpression(val -0.5440211108893698) -> 6:double, ConstantVectorExpression(val 0.6483608274590866) -> 7:double, ConstantVectorExpression(val 0.17453292519943295) -> 8:double
                       Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -105,14 +86,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -131,24 +104,20 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_udf2
 #### A masked pattern was here ####
 NaN	NaN	1.4711276743037347	-0.8390715290764524	-0.5440211108893698	0.6483608274590866	0.17453292519943295
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT
   exp(key), ln(key),
   log(key), log(key, key), log(key, value), log(value, key),
   log10(key), sqrt(key)
 FROM DECIMAL_UDF2 WHERE key = 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT
   exp(key), ln(key),
   log(key), log(key, key), log(key, value), log(value, key),
   log10(key), sqrt(key)
 FROM DECIMAL_UDF2 WHERE key = 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -163,30 +132,15 @@ STAGE PLANS:
                 TableScan
                   alias: decimal_udf2
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDecimalColEqualDecimalScalar(col 0, val 10) -> boolean
                     predicate: (key = 10) (type: boolean)
                     Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 9]
-                          selectExpressions: ConstantVectorExpression(val 22026.465794806718) -> 2:double, ConstantVectorExpression(val 2.302585092994046) -> 3:double, ConstantVectorExpression(val 2.302585092994046) -> 4:double, ConstantVectorExpression(val 1.0) -> 5:double, FuncLogWithBaseLongToDouble(col 1) -> 6:double, VectorUDFAdaptor(log(value, 10)) -> 7:Double, ConstantVectorExpression(val 1.0) -> 8:double, ConstantVectorExpression(val 3.1622776601683795) -> 9:double
                       Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -194,14 +148,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
index c63c486..188c624 100644
--- a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select distinct s, t from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select distinct s, t from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,24 +128,11 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: t (type: tinyint), s (type: string)
                     outputColumnNames: t, s
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 8
-                          native: false
-                          projectedOutputColumns: []
                       keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -158,38 +141,13 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -197,16 +155,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: string), _col0 (type: tinyint)
                   outputColumnNames: _col0, _col1
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0]
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_elt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_elt.q.out b/ql/src/test/results/clientpositive/llap/vector_elt.q.out
index 44ba6de..bb66867 100644
--- a/ql/src/test/results/clientpositive/llap/vector_elt.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_elt.q.out
@@ -1,80 +1,29 @@
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
+PREHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
+POSTHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
 WHERE ctinyint > 0 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 935842 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0, val 0) -> boolean
-                    predicate: (ctinyint > 0) (type: boolean)
-                    Statistics: Num rows: 4096 Data size: 312018 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [13, 6, 2, 16]
-                          selectExpressions: LongColAddLongScalar(col 12, val 1)(children: LongColModuloLongScalar(col 0, val 2)(children: col 0) -> 12:long) -> 13:long, VectorElt(columns [14, 6, 15])(children: LongColAddLongScalar(col 12, val 1)(children: LongColModuloLongScalar(col 0, val 2)(children: col 0) -> 12:long) -> 14:long, col 6, CastLongToString(col 2) -> 15:String) -> 16:string
-                      Statistics: Num rows: 4096 Data size: 1069830 Basic stats: COMPLETE Column stats: COMPLETE
-                      Limit
-                        Number of rows: 10
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                        Statistics: Num rows: 10 Data size: 2664 Basic stats: COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-                          Statistics: Num rows: 10 Data size: 2664 Basic stats: COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: (ctinyint > 0) (type: boolean)
+            Select Operator
+              expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) 
 FROM alltypesorc
@@ -98,7 +47,7 @@ POSTHOOK: Input: default@alltypesorc
 1	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
 2	cvLH6Eat2yFsyy7p	528534767	528534767
 1	cvLH6Eat2yFsyy7p	528534767	cvLH6Eat2yFsyy7p
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
        elt('1', 'abc', 'defg'),
@@ -111,7 +60,7 @@ SELECT elt(2, 'abc', 'defg'),
        elt(3, 'abc', 'defg')
 FROM alltypesorc LIMIT 1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
        elt('1', 'abc', 'defg'),
@@ -124,68 +73,22 @@ SELECT elt(2, 'abc', 'defg'),
        elt(3, 'abc', 'defg')
 FROM alltypesorc LIMIT 1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Select Operator
-                    expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                        selectExpressions: ConstantVectorExpression(val defg) -> 12:string, ConstantVectorExpression(val cc) -> 13:string, ConstantVectorExpression(val abc) -> 14:string, ConstantVectorExpression(val 2) -> 15:string, ConstantVectorExpression(val 12345) -> 16:string, ConstantVectorExpression(val 123456789012) -> 17:string, ConstantVectorExpression(val 1.25) -> 18:string, ConstantVectorExpression(val 16.0) -> 19:string, ConstantVectorExpression(val null) -> 20:string, ConstantVectorExpression(val null) -> 21:string
-                    Statistics: Num rows: 12288 Data size: 8687784 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 1
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+            Limit
+              Number of rows: 1
+              ListSink
 
 PREHOOK: query: SELECT elt(2, 'abc', 'defg'),
        elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
index 258c00c..1e24e81 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
@@ -22,18 +22,14 @@ CREATE TABLE dest1(c1 STRING) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest1
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -54,55 +50,21 @@ STAGE PLANS:
                 TableScan
                   alias: srcorc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: substr(key, 1, 1) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
-                        selectExpressions: StringSubstrColStartLen(col 0, start 0, length 1) -> 2:string
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: rand() (type: double)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: partial1
                 outputColumnNames: _col0
@@ -111,37 +73,17 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: final
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
index 9ed40df..758f70c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
@@ -22,18 +22,14 @@ CREATE TABLE dest1(c1 STRING) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest1
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -54,55 +50,21 @@ STAGE PLANS:
                 TableScan
                   alias: srcorc
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: substr(value, 5, 1) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
-                        selectExpressions: StringSubstrColStartLen(col 1, start 4, length 1) -> 2:string
                     Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: rand() (type: double)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: partial1
                 outputColumnNames: _col0
@@ -111,37 +73,17 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkStringOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: final
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
index c97863a..fb5dfe6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
@@ -105,16 +105,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select s, t, max(b) from vectortab2korc group by s, t
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select s, t, max(b) from vectortab2korc group by s, t
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -132,26 +128,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: t (type: tinyint), s (type: string), b (type: bigint)
                     outputColumnNames: t, s, b
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 3]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(b)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMaxLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0, col 8
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
@@ -160,41 +142,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMaxLong(col 2) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0, col 1
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
@@ -202,16 +158,9 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1, 0, 2]
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[14/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
index 911a962..edb67f1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
@@ -45,20 +45,16 @@ POSTHOOK: Input: default@src
 0	val_0
 10	val_10
 100	val_100
-PREHOOK: query: explain vectorization select key, value
+PREHOOK: query: explain select key, value
 from varchar_2
 order by key asc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select key, value
+POSTHOOK: query: explain select key, value
 from varchar_2
 order by key asc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -88,23 +84,8 @@ STAGE PLANS:
                       value expressions: _col1 (type: varchar(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: varchar(10)), VALUE._col0 (type: varchar(20))
@@ -167,20 +148,16 @@ POSTHOOK: Input: default@src
 97	val_97
 97	val_97
 96	val_96
-PREHOOK: query: explain vectorization select key, value
+PREHOOK: query: explain select key, value
 from varchar_2
 order by key desc
 limit 5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select key, value
+POSTHOOK: query: explain select key, value
 from varchar_2
 order by key desc
 limit 5
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -210,23 +187,8 @@ STAGE PLANS:
                       value expressions: _col1 (type: varchar(20))
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: varchar(10)), VALUE._col0 (type: varchar(20))
@@ -292,16 +254,12 @@ create table varchar_3 (
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_3
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert into table varchar_3 select cint from alltypesorc limit 10
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert into table varchar_3 select cint from alltypesorc limit 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -321,81 +279,36 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 10
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
                       Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: int)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: CAST( _col0 AS varchar(25)) (type: varchar(25))
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1]
-                        selectExpressions: CastLongToVarChar(col 0, maxLength 25) -> 1:VarChar
                     Statistics: Num rows: 10 Data size: 1090 Basic stats: COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 10 Data size: 1090 Basic stats: COMPLETE Column stats: COMPLETE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
index fbec4e0..5a3cfe4 100644
--- a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
@@ -20,16 +20,12 @@ POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@count_case_groupby
 POSTHOOK: Lineage: count_case_groupby.bool EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 POSTHOOK: Lineage: count_case_groupby.key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -47,27 +43,12 @@ STAGE PLANS:
                 TableScan
                   alias: count_case_groupby
                   Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: key (type: string), CASE WHEN (bool) THEN (1) WHEN ((not bool)) THEN (0) ELSE (null) END (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 3]
-                        selectExpressions: VectorUDFAdaptor(CASE WHEN (bool) THEN (1) WHEN ((not bool)) THEN (0) ELSE (null) END)(children: NotCol(col 1) -> 2:boolean) -> 3:Long
                     Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(_col1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFCount(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 0
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -76,50 +57,21 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkStringOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
index 34e7dca..faceb5c 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
@@ -1,7 +1,7 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Use ORDER BY clauses to generate 2 stages.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ctinyint) as c1,
        MAX(ctinyint),
        COUNT(ctinyint),
@@ -12,7 +12,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
 -- Use ORDER BY clauses to generate 2 stages.
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT MIN(ctinyint) as c1,
        MAX(ctinyint),
        COUNT(ctinyint),
@@ -20,10 +20,6 @@ SELECT MIN(ctinyint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -42,101 +38,43 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: min(ctinyint), max(ctinyint), count(ctinyint), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 0) -> tinyint, VectorUDAFMaxLong(col 0) -> tinyint, VectorUDAFCount(col 0) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 0) -> tinyint, VectorUDAFMaxLong(col 1) -> tinyint, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: tinyint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: tinyint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -168,20 +106,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -64	62	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(ctinyint) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -200,100 +134,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: sum(ctinyint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 0) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -319,7 +195,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -39856
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(ctinyint) as c1,
   variance(ctinyint),
@@ -332,7 +208,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(ctinyint) as c1,
   variance(ctinyint),
@@ -345,10 +221,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -382,21 +254,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:tinyint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:tinyint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -410,13 +269,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -465,7 +317,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -4.344925324321378	1158.3003004768184	1158.3003004768184	1158.4265870337827	34.033811136527426	34.033811136527426	34.033811136527426	34.03566639620536
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(cbigint) as c1,
        MAX(cbigint),
        COUNT(cbigint),
@@ -473,7 +325,7 @@ SELECT MIN(cbigint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(cbigint) as c1,
        MAX(cbigint),
        COUNT(cbigint),
@@ -481,10 +333,6 @@ SELECT MIN(cbigint) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -503,101 +351,43 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: cbigint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: min(cbigint), max(cbigint), count(cbigint), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinLong(col 3) -> bigint, VectorUDAFMaxLong(col 3) -> bigint, VectorUDAFCount(col 3) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinLong(col 0) -> bigint, VectorUDAFMaxLong(col 1) -> bigint, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -629,20 +419,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -2147311592	2145498388	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(cbigint) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -661,100 +447,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: cbigint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: sum(cbigint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: bigint)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -780,7 +508,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -1698460028409
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(cbigint) as c1,
   variance(cbigint),
@@ -793,7 +521,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(cbigint) as c1,
   variance(cbigint),
@@ -806,10 +534,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -843,21 +567,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:bigint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:bigint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -871,13 +582,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -926,7 +630,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -1.8515862077935246E8	2.07689300543081907E18	2.07689300543081907E18	2.07711944383088768E18	1.441142951074188E9	1.441142951074188E9	1.441142951074188E9	1.4412215110214279E9
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(cfloat) as c1,
        MAX(cfloat),
        COUNT(cfloat),
@@ -934,7 +638,7 @@ SELECT MIN(cfloat) as c1,
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(cfloat) as c1,
        MAX(cfloat),
        COUNT(cfloat),
@@ -942,10 +646,6 @@ SELECT MIN(cfloat) as c1,
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -964,101 +664,43 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cfloat (type: float)
                     outputColumnNames: cfloat
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: min(cfloat), max(cfloat), count(cfloat), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinDouble(col 4) -> float, VectorUDAFMaxDouble(col 4) -> float, VectorUDAFCount(col 4) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinDouble(col 0) -> float, VectorUDAFMaxDouble(col 1) -> float, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: float)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: float), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1090,20 +732,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -64.0	79.553	9173	12288
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT SUM(cfloat) as c1
 FROM   alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1122,100 +760,42 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cfloat (type: float)
                     outputColumnNames: cfloat
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4]
                     Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: sum(cfloat)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumDouble(col 4) -> double
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumDouble(col 0) -> double
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: double)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1241,7 +821,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -39479.635992884636
-PREHOOK: query: EXPLAIN VECTORIZATION 
+PREHOOK: query: EXPLAIN 
 SELECT
   avg(cfloat) as c1,
   variance(cfloat),
@@ -1254,7 +834,7 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION 
+POSTHOOK: query: EXPLAIN 
 SELECT
   avg(cfloat) as c1,
   variance(cfloat),
@@ -1267,10 +847,6 @@ SELECT
 FROM alltypesorc
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1304,21 +880,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:float>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:float> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)
@@ -1332,13 +895,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double)
@@ -1388,7 +944,7 @@ POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 -4.303895780321011	1163.8972588604984	1163.8972588604984	1164.0241556397025	34.115938487171924	34.115938487171924	34.115938487171924	34.11779822379666
 WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),
@@ -1415,7 +971,7 @@ WHERE  (((cstring2 LIKE '%b%')
             AND ((cboolean2 = 1)
                  AND (3569 = ctinyint))))
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT AVG(cbigint),
        (-(AVG(cbigint))),
        (-6432 + AVG(cbigint)),
@@ -1442,10 +998,6 @@ WHERE  (((cstring2 LIKE '%b%')
             AND ((cboolean2 = 1)
                  AND (3569 = ctinyint))))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1463,33 +1015,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 1210980 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7, pattern %b%) -> boolean, FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 12)(children: CastLongToDecimal(col 2) -> 12:decimal(13,3)) -> boolean, FilterDoubleColLessDoubleColumn(col 13, col 5)(children: CastLongToDouble(col 3) -> 13:double) -> boolean, FilterExprAndExpr(children: FilterLongColGreaterEqualLongColumn(col 0, col 1)(children: col 0) -> boolean, FilterLongColEqualLongScalar(col 11, val 1) -> boolean, FilterLongScalarEqualLongColumn(val 3569, col 0)(children: col 0) -> boolean) -> boolean) -> boolean
                     predicate: ((cstring2 like '%b%') or (79.553 <> CAST( cint AS decimal(13,3))) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569 = UDFToInteger(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 1210980 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)
                       outputColumnNames: cbigint, cfloat, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3, 4, 0]
                       Statistics: Num rows: 12288 Data size: 1210980 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFAvgLong(col 3) -> struct<count:bigint,sum:double>, VectorUDAFStdPopLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarSampLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint, VectorUDAFSumDouble(col 4) -> double, VectorUDAFMinLong(col 0) -> tinyint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(col 3) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarSampLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1499,21 +1033,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: struct<count:bigint,sum:double,input:bigint>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: bigint), _col4 (type: double), _col5 (type: tinyint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:bigint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), stddev_pop(VALUE._col1), var_samp(VALUE._col2), count(VALUE._col3), sum(VALUE._col4), min(VALUE._col5)


[61/62] hive git commit: HIVE-14973. Fix Flaky test: TestJdbcWithSQLAuthorization.testBlackListedUdfUsage. (Zoltan Haindrich, reviewed by Siddharth Seth)

Posted by we...@apache.org.
HIVE-14973. Fix Flaky test: TestJdbcWithSQLAuthorization.testBlackListedUdfUsage. (Zoltan Haindrich, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57044c47
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57044c47
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57044c47

Branch: refs/heads/hive-14535
Commit: 57044c47fb4b9cff2e3e3cea1b3fcf5d00df11a4
Parents: 8888fe4
Author: Siddharth Seth <ss...@apache.org>
Authored: Tue Oct 18 16:05:41 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Tue Oct 18 16:05:41 2016 -0700

----------------------------------------------------------------------
 .../hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/57044c47/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java
index b111f95..5070c76 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java
@@ -72,7 +72,7 @@ public class TestJdbcWithSQLAuthUDFBlacklist {
     Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "user1", "bar");
 
     Statement stmt = hs2Conn.createStatement();
-    String tableName1 = "test_jdbc_sql_auth_udf";
+    String tableName1 = "test_jdbc_sql_auth_udf_blacklist";
     stmt.execute("create table " + tableName1 + "(i int) ");
 
     verifyUDFNotAllowed(stmt, tableName1, "sqrt(1)", "sqrt");


[36/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
index 964ce95..85116e7 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
@@ -282,73 +282,25 @@ POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	_c54
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_boolean_to_bigint
-                  Statistics: Num rows: 10 Data size: 4759 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 (type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 (type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 (type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 (type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 55, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 55
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    dataColumns: insert_num:int, c1:boolean, c2:boolean, c3:boolean, c4:boolean, c5:boolean, c6:boolean, c7:boolean, c8:boolean, c9:boolean, c10:tinyint, c11:tinyint, c12:tinyint, c13:tinyint, c14:tinyint, c15:tinyint, c16:tinyint, c17:tinyint, c18:tinyint, c19:tinyint, c20:tinyint, c21:smallint, c22:smallint, c23:smallint, c24:smallint, c25:smallint, c26:smallint, c27:smallint, c28:smallint, c29:smallint, c30:smallint, c31:smallint, c32:int, c33:int, c34:int, c35:int, c36:int, c37:int, c38:int, c39:int, c40:int, c41:int, c42:int, c43:bigint, c44:bigint, c45:bigint, c46:bigint, c47:bigint, c48:bigint, c49:bigint, c50:bigint, c51:bigint, c52:bigint, c53:bigint, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=10 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
+          TableScan [TS_0] (rows=10 width=475)
+            default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -545,73 +497,25 @@ POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	_c34
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_decimal_to_double
-                  Statistics: Num rows: 6 Data size: 2563 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 (type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 (type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 (type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 (type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 (type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 (type: double), c33 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 35, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 35
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    dataColumns: insert_num:int, c1:decimal(38,18), c2:decimal(38,18), c3:decimal(38,18), c4:decimal(38,18), c5:decimal(38,18), c6:decimal(38,18), c7:decimal(38,18), c8:decimal(38,18), c9:decimal(38,18), c10:decimal(38,18), c11:decimal(38,18), c12:float, c13:float, c14:float, c15:float, c16:float, c17:float, c18:float, c19:float, c20:float, c21:float, c22:float, c23:double, c24:double, c25:double, c26:double, c27:double, c28:double, c29:double, c30:double, c31:double, c32:double, c33:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
+          TableScan [TS_0] (rows=6 width=427)
+            default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -724,73 +628,25 @@ POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	_c13
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_timestamp
-                  Statistics: Num rows: 6 Data size: 874 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: timestamp), c12 (type: timestamp), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 14, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 14
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    dataColumns: insert_num:int, c1:timestamp, c2:timestamp, c3:timestamp, c4:timestamp, c5:timestamp, c6:timestamp, c7:timestamp, c8:timestamp, c9:timestamp, c10:timestamp, c11:timestamp, c12:timestamp, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
+          TableScan [TS_0] (rows=6 width=145)
+            default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -887,73 +743,25 @@ POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	date1	date1	date1	date1	_c5
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_date
-                  Statistics: Num rows: 6 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 6, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 6
-                    includeColumns: [0, 1, 2, 3, 4, 5]
-                    dataColumns: insert_num:int, c1:date, c2:date, c3:date, c4:date, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+          TableScan [TS_0] (rows=6 width=62)
+            default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
@@ -1131,73 +939,25 @@ POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).insert_num SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 same_type1_c_txt.insert_num	same_type1_c_txt.c1	same_type1_c_txt.c2	same_type1_c_txt.c3	same_type1_c_txt.c4	same_type1_c_txt.c5	same_type1_c_txt.c6	same_type1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_same_type_different_params
-                  Statistics: Num rows: 13 Data size: 1311 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 1, 2, 3, 4, 5, 6, 7]
-                    Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 8
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                    dataColumns: insert_num:int, c1:char(8), c2:char(32), c3:varchar(15), c4:varchar(18), c5:decimal(10,2), c6:decimal(25,15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=13 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+          TableScan [TS_0] (rows=13 width=100)
+            default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
index 208ee4f..04f2891 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
@@ -87,72 +87,25 @@ POSTHOOK: Lineage: table_add_int_permute_select.b SIMPLE [(values__tmp__table__1
 POSTHOOK: Lineage: table_add_int_permute_select.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_permute_select.insert_num EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_permute_select
-                  Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=20)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=20)
+            default@table_add_int_permute_select,table_add_int_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_permute_select
@@ -259,72 +212,25 @@ POSTHOOK: Lineage: table_add_int_string_permute_select.c EXPRESSION [(values__tm
 POSTHOOK: Lineage: table_add_int_string_permute_select.d SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_string_permute_select.insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,a,b from table_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_add_int_string_permute_select
-                  Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2]
-                    Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 101 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=20)
+          Output:["_col0","_col1","_col2"]
+          TableScan [TS_0] (rows=5 width=20)
+            default@table_add_int_string_permute_select,table_add_int_string_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,a,b from table_add_int_string_permute_select
@@ -501,72 +407,25 @@ POSTHOOK: Lineage: table_change_string_group_double.c2 EXPRESSION [(values__tmp_
 POSTHOOK: Lineage: table_change_string_group_double.c3 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_string_group_double.insert_num EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 264 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 0
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=52)
+          Output:["_col0","_col1","_col2","_col3","_col4"]
+          TableScan [TS_0] (rows=5 width=52)
+            default@table_change_string_group_double,table_change_string_group_double,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
@@ -836,72 +695,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_gro
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_multi_ints_string_group.insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 755 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=151)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21"]
+          TableScan [TS_0] (rows=5 width=151)
+            default@table_change_numeric_group_string_group_multi_ints_string_group,table_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1056,72 +868,25 @@ POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: table_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                  Select Operator
-                    expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 1250 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.row.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 0
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=250)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
+          TableScan [TS_0] (rows=5 width=250)
+            default@table_change_numeric_group_string_group_floating_string_group,table_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
index 653274c..d4a9747 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
@@ -101,16 +101,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -128,26 +124,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: dc (type: decimal(38,18))
                     outputColumnNames: dc
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [6]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(dc), max(dc), sum(dc), avg(dc)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinDecimal(col 6) -> decimal(38,18), VectorUDAFMaxDecimal(col 6) -> decimal(38,18), VectorUDAFSumDecimal(col 6) -> decimal(38,18), VectorUDAFAvgDecimal(col 6) -> struct<count:bigint,sum:decimal(38,18)>
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgDecimal(col 6) -> struct<count:bigint,sum:decimal(38,18)> output type STRUCT requires PRIMITIVE IS false
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 624 Basic stats: COMPLETE Column stats: NONE
@@ -157,21 +139,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: struct<count:bigint,sum:decimal(38,18),input:decimal(38,18)>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:decimal(38,18),input:decimal(38,18)> of Column[VALUE._col3] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), sum(VALUE._col2), avg(VALUE._col3)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
index 6785248..3d00ade 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
@@ -33,9 +33,9 @@ POSTHOOK: Output: default@testvec
 POSTHOOK: Lineage: testvec.dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 POSTHOOK: Lineage: testvec.greg_dt SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
 POSTHOOK: Lineage: testvec.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: explain vectorization select max(dt), max(greg_dt) from testvec where id=5
+PREHOOK: query: explain select max(dt), max(greg_dt) from testvec where id=5
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select max(dt), max(greg_dt) from testvec where id=5
+POSTHOOK: query: explain select max(dt), max(greg_dt) from testvec where id=5
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
 


[55/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/explain_logical.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out
index 4a25a38..79a3050 100644
--- a/ql/src/test/results/clientpositive/explain_logical.q.out
+++ b/ql/src/test/results/clientpositive/explain_logical.q.out
@@ -364,36 +364,37 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4
 POSTHOOK: type: QUERY
 LOGICAL PLAN:
-$hdt$_0:src 
+$hdt$_0:srcpart 
   TableScan (TS_0)
-    alias: src
+    alias: srcpart
     properties:
       insideView TRUE
-    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
     Filter Operator (FIL_15)
       predicate: key is not null (type: boolean)
-      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
       Select Operator (SEL_2)
-        expressions: key (type: string)
-        outputColumnNames: _col0
-        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        expressions: key (type: string), value (type: string)
+        outputColumnNames: _col0, _col1
+        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Reduce Output Operator (RS_9)
           key expressions: _col0 (type: string)
           sort order: +
           Map-reduce partition columns: _col0 (type: string)
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          value expressions: _col1 (type: string)
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 1 to 2
+                 Inner Join 0 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col0, _col2, _col4
+            outputColumnNames: _col1, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
             Select Operator (SEL_13)
-              expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+              expressions: _col2 (type: string), _col1 (type: string), _col4 (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
               File Output Operator (FS_14)
@@ -403,34 +404,33 @@ $hdt$_0:src
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-$hdt$_1:srcpart 
+$hdt$_1:src 
   TableScan (TS_3)
-    alias: srcpart
+    alias: src
     properties:
       insideView TRUE
-    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
     Filter Operator (FIL_16)
       predicate: key is not null (type: boolean)
-      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Select Operator (SEL_5)
-        expressions: key (type: string), value (type: string)
-        outputColumnNames: _col0, _col1
-        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+        expressions: key (type: string)
+        outputColumnNames: _col0
+        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reduce Output Operator (RS_10)
           key expressions: _col0 (type: string)
           sort order: +
           Map-reduce partition columns: _col0 (type: string)
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-          value expressions: _col1 (type: string)
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 1 to 2
+                 Inner Join 0 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col0, _col2, _col4
+            outputColumnNames: _col1, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
 $hdt$_2:src3 
   TableScan (TS_6)
@@ -454,12 +454,12 @@ $hdt$_2:src3
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 1 to 2
+                 Inner Join 0 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col0, _col2, _col4
+            outputColumnNames: _col1, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
 
 PREHOOK: query: -- The table should show up in the explain logical even if none

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
index e70f912..6572f6c 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
@@ -1387,11 +1387,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: key
+              outputColumnNames: _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(key)
-                keys: key (type: string)
+                aggregations: count(1)
+                keys: _col1 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -1482,7 +1482,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [t1]
+        /t1 [$hdt$_0:t1]
 
   Stage: Stage-7
     Conditional Operator
@@ -1708,7 +1708,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1757,11 +1757,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -1826,7 +1826,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [t1]
+        /t1 [$hdt$_0:t1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -1912,7 +1912,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -4123,11 +4123,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -4218,7 +4218,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [t2]
+        /t2 [$hdt$_0:t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4444,7 +4444,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -4502,11 +4502,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -4597,7 +4597,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [t2]
+        /t2 [$hdt$_0:t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4823,7 +4823,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
index fc52984..ce71354 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
@@ -1453,11 +1453,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: key
+              outputColumnNames: _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(key)
-                keys: key (type: string)
+                aggregations: count(1)
+                keys: _col1 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -1548,7 +1548,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [t1]
+        /t1 [$hdt$_0:t1]
 
   Stage: Stage-7
     Conditional Operator
@@ -1774,7 +1774,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1824,11 +1824,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -1893,7 +1893,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [t1]
+        /t1 [$hdt$_0:t1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -2044,7 +2044,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -4585,11 +4585,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -4680,7 +4680,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [t2]
+        /t2 [$hdt$_0:t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4906,7 +4906,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -4964,11 +4964,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
+              outputColumnNames: _col0, _col2
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(val)
-                keys: key (type: string), val (type: string)
+                aggregations: count(1)
+                keys: _col0 (type: string), _col2 (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -5059,7 +5059,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [t2]
+        /t2 [$hdt$_0:t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -5285,7 +5285,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_pushdown.q.out b/ql/src/test/results/clientpositive/limit_pushdown.q.out
index 6aaf9b8..898662b 100644
--- a/ql/src/test/results/clientpositive/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/limit_pushdown.q.out
@@ -695,7 +695,17 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 0
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key,value from src order by key limit 0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/limit_pushdown3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_pushdown3.q.out b/ql/src/test/results/clientpositive/limit_pushdown3.q.out
index 8ccda6a..66dc77f 100644
--- a/ql/src/test/results/clientpositive/limit_pushdown3.q.out
+++ b/ql/src/test/results/clientpositive/limit_pushdown3.q.out
@@ -824,7 +824,17 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 0
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key,value from src order by key limit 0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out
index 4ea1488..0978ddd 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out
@@ -28,11 +28,11 @@ Stage-0
     Stage-1
       Reducer 3 llap
       File Output Operator [FS_12]
-        Select Operator [SEL_11] (rows=7286 width=620)
+        Select Operator [SEL_11] (rows=9759 width=620)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
         <-Reducer 2 [SIMPLE_EDGE] llap
           SHUFFLE [RS_10]
-            Merge Join Operator [MERGEJOIN_17] (rows=7286 width=620)
+            Merge Join Operator [MERGEJOIN_17] (rows=9759 width=620)
               Conds:RS_6._col2=RS_7._col2(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
             <-Map 1 [SIMPLE_EDGE] llap
               SHUFFLE [RS_6]
@@ -46,10 +46,10 @@ Stage-0
             <-Map 4 [SIMPLE_EDGE] llap
               SHUFFLE [RS_7]
                 PartitionCols:_col2
-                Select Operator [SEL_5] (rows=3424 width=251)
+                Select Operator [SEL_5] (rows=4586 width=251)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
-                  Filter Operator [FIL_16] (rows=3424 width=251)
-                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                  Filter Operator [FIL_16] (rows=4586 width=251)
+                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                     TableScan [TS_3] (rows=12288 width=251)
                       default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
 
@@ -113,7 +113,7 @@ Stage-0
           SHUFFLE [RS_11]
             Group By Operator [GBY_10] (rows=1 width=8)
               Output:["_col0"],aggregations:["count()"]
-              Merge Join Operator [MERGEJOIN_19] (rows=7286 width=8)
+              Merge Join Operator [MERGEJOIN_19] (rows=9759 width=8)
                 Conds:RS_6._col0=RS_7._col0(Inner)
               <-Map 1 [SIMPLE_EDGE] llap
                 SHUFFLE [RS_6]
@@ -127,10 +127,10 @@ Stage-0
               <-Map 4 [SIMPLE_EDGE] llap
                 SHUFFLE [RS_7]
                   PartitionCols:_col0
-                  Select Operator [SEL_5] (rows=3424 width=8)
+                  Select Operator [SEL_5] (rows=4586 width=8)
                     Output:["_col0"]
-                    Filter Operator [FIL_18] (rows=3424 width=8)
-                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                    Filter Operator [FIL_18] (rows=4586 width=8)
+                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                       TableScan [TS_3] (rows=12288 width=8)
                         default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["cint","cbigint"]
 
@@ -193,7 +193,7 @@ Stage-0
                 PartitionCols:_col0
                 Group By Operator [GBY_10] (rows=2765 width=12)
                   Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                  Merge Join Operator [MERGEJOIN_21] (rows=7286 width=4)
+                  Merge Join Operator [MERGEJOIN_21] (rows=9759 width=4)
                     Conds:RS_6._col1=RS_7._col0(Inner),Output:["_col0"]
                   <-Map 1 [SIMPLE_EDGE] llap
                     SHUFFLE [RS_6]
@@ -207,10 +207,10 @@ Stage-0
                   <-Map 5 [SIMPLE_EDGE] llap
                     SHUFFLE [RS_7]
                       PartitionCols:_col0
-                      Select Operator [SEL_5] (rows=3424 width=8)
+                      Select Operator [SEL_5] (rows=4586 width=8)
                         Output:["_col0"]
-                        Filter Operator [FIL_20] (rows=3424 width=8)
-                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                        Filter Operator [FIL_20] (rows=4586 width=8)
+                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                           TableScan [TS_3] (rows=12288 width=8)
                             default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["cint","cbigint"]
 
@@ -281,7 +281,7 @@ Stage-0
                 Select Operator [SEL_5] (rows=6144 width=215)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                   Filter Operator [FIL_16] (rows=6144 width=215)
-                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                     TableScan [TS_3] (rows=12288 width=215)
                       default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
             <-Map 1 [CUSTOM_SIMPLE_EDGE] llap
@@ -362,7 +362,7 @@ Stage-0
                   Select Operator [SEL_5] (rows=6144 width=215)
                     Output:["_col0"]
                     Filter Operator [FIL_18] (rows=6144 width=215)
-                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                       TableScan [TS_3] (rows=12288 width=215)
                         default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
               <-Map 1 [CUSTOM_SIMPLE_EDGE] llap
@@ -442,7 +442,7 @@ Stage-0
                       Select Operator [SEL_5] (rows=6144 width=215)
                         Output:["_col0"]
                         Filter Operator [FIL_20] (rows=6144 width=215)
-                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                           TableScan [TS_3] (rows=12288 width=215)
                             default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
                   <-Map 1 [CUSTOM_SIMPLE_EDGE] llap

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
index 3fe4837..507791e 100644
--- a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
@@ -778,7 +778,14 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 0
+              ListSink
 
 PREHOOK: query: select key,value from src order by key limit 0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/lineage3.q.out b/ql/src/test/results/clientpositive/llap/lineage3.q.out
index 257c547..1a532da 100644
--- a/ql/src/test/results/clientpositive/llap/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/llap/lineage3.q.out
@@ -116,7 +116,7 @@ order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"tez","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"(alltypesorc.ctinyint is not null and alltypesorc.cbigint is not null and (alltypesorc.ctinyint < 100))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = b.ctinyint)","e
 dgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((b.ctinyint < 100) and b.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"tez","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = b.ctinyint)","edgeType":"PREDICATE"},{"sources":[5,6
 ],"targets":[0,1,2,3],"expression":"((b.ctinyint < 100) and b.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1067683781	-51

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/table_access_keys_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/llap/table_access_keys_stats.q.out
index 91bdff3..e68f60f 100644
--- a/ql/src/test/results/clientpositive/llap/table_access_keys_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/table_access_keys_stats.q.out
@@ -77,7 +77,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key
 
@@ -90,7 +90,7 @@ PREHOOK: query: SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key,val
 
@@ -104,7 +104,7 @@ PREHOOK: query: SELECT key, 1, val, 2, count(1) FROM T1 GROUP BY key, 1, val, 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key,val
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
index 3c6ef9a..5c8db64 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
@@ -55,17 +55,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                      Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col2 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col2 (type: int)
-                        Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
             Execution mode: llap
             LLAP IO: all inputs
@@ -79,11 +79,11 @@ STAGE PLANS:
                   0 _col2 (type: int)
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
-                Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col2 (type: int)
                   sort order: +
-                  Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
             Execution mode: llap
@@ -91,10 +91,10 @@ STAGE PLANS:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
-                Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -186,17 +186,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -208,7 +208,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 7286 Data size: 58288 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 78072 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -315,17 +315,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -338,7 +338,7 @@ STAGE PLANS:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 7286 Data size: 29144 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 39036 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   keys: _col0 (type: smallint)
@@ -468,7 +468,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -602,7 +602,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -734,7 +734,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
index c3aebc7..b2359c1 100644
--- a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
@@ -55,17 +55,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                      Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col2 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col2 (type: int)
-                        Statistics: Num rows: 3424 Data size: 862030 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 1154510 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
             Execution mode: llap
             LLAP IO: all inputs
@@ -79,11 +79,11 @@ STAGE PLANS:
                   0 _col2 (type: int)
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
-                Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col2 (type: int)
                   sort order: +
-                  Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
             Execution mode: llap
@@ -91,10 +91,10 @@ STAGE PLANS:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
-                Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 7286 Data size: 4517320 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 9759 Data size: 6050580 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -186,17 +186,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -208,7 +208,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 7286 Data size: 58288 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 78072 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -315,17 +315,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
-                    Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 3424 Data size: 30684 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 4586 Data size: 41088 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -338,7 +338,7 @@ STAGE PLANS:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 7286 Data size: 29144 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 9759 Data size: 39036 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   keys: _col0 (type: smallint)
@@ -468,7 +468,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -602,7 +602,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -734,7 +734,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
+                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out b/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
index 14cde78..71766a7 100644
--- a/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
+++ b/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
@@ -695,7 +695,17 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 0
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key,value from src order by key limit 0,0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/perf/query75.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query75.q.out b/ql/src/test/results/clientpositive/perf/query75.q.out
index 0c72248..f78ce8a 100644
--- a/ql/src/test/results/clientpositive/perf/query75.q.out
+++ b/ql/src/test/results/clientpositive/perf/query75.q.out
@@ -81,7 +81,7 @@ Stage-0
                                         Select Operator [SEL_82] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_231] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_80] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 27 [SIMPLE_EDGE]
@@ -136,7 +136,7 @@ Stage-0
                                         Select Operator [SEL_104] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_235] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_102] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 36 [SIMPLE_EDGE]
@@ -191,7 +191,7 @@ Stage-0
                                         Select Operator [SEL_128] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_239] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_126] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 43 [SIMPLE_EDGE]
@@ -252,7 +252,7 @@ Stage-0
                                         Select Operator [SEL_30] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_223] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_28] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 13 [SIMPLE_EDGE]
@@ -307,7 +307,7 @@ Stage-0
                                         Select Operator [SEL_54] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_227] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_52] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 20 [SIMPLE_EDGE]
@@ -362,7 +362,7 @@ Stage-0
                                         Select Operator [SEL_8] (rows=231000 width=1436)
                                           Output:["_col0","_col1","_col2","_col3","_col5"]
                                           Filter Operator [FIL_219] (rows=231000 width=1436)
-                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                             TableScan [TS_6] (rows=462000 width=1436)
                                               default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                                     <-Reducer 2 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
index c6a7982..92ca67b 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
@@ -836,11 +836,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: key
+                    outputColumnNames: _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(key)
-                      keys: key (type: string)
+                      aggregations: count(1)
+                      keys: _col1 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -974,7 +974,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1028,11 +1028,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -1184,7 +1184,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -3074,11 +3074,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -3212,7 +3212,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3268,11 +3268,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -3406,7 +3406,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
index a438124..f7f4dbb 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
@@ -855,11 +855,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: key
+                    outputColumnNames: _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(key)
-                      keys: key (type: string)
+                      aggregations: count(1)
+                      keys: _col1 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -993,7 +993,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1048,11 +1048,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -1222,7 +1222,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -3207,11 +3207,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -3345,7 +3345,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3401,11 +3401,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: key, val
+                    outputColumnNames: _col0, _col2
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(val)
-                      keys: key (type: string), val (type: string)
+                      aggregations: count(1)
+                      keys: _col0 (type: string), _col2 (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -3539,7 +3539,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
index 67c6e70..95de5b1 100644
--- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
@@ -746,7 +746,14 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 0
+              ListSink
 
 PREHOOK: query: select key,value from src order by key limit 0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
index e26ccec..07727d4 100644
--- a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
+++ b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
@@ -77,7 +77,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key
 
@@ -90,7 +90,7 @@ PREHOOK: query: SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key,val
 
@@ -104,7 +104,7 @@ PREHOOK: query: SELECT key, 1, val, 2, count(1) FROM T1 GROUP BY key, 1, val, 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_2
+Operator:GBY_3
 Table:default@t1
 Keys:key,val
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/tez/explainanalyze_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_4.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_4.q.out
index 3426d19..9a3d59f 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_4.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_4.q.out
@@ -46,11 +46,11 @@ Stage-0
     Stage-1
       Reducer 3
       File Output Operator [FS_12]
-        Select Operator [SEL_11] (rows=7286/10 width=620)
+        Select Operator [SEL_11] (rows=9759/10 width=620)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
         <-Reducer 2 [SIMPLE_EDGE]
           SHUFFLE [RS_10]
-            Merge Join Operator [MERGEJOIN_17] (rows=7286/10 width=620)
+            Merge Join Operator [MERGEJOIN_17] (rows=9759/10 width=620)
               Conds:RS_6._col2=RS_7._col2(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
             <-Map 1 [SIMPLE_EDGE]
               SHUFFLE [RS_6]
@@ -64,10 +64,10 @@ Stage-0
             <-Map 4 [SIMPLE_EDGE]
               SHUFFLE [RS_7]
                 PartitionCols:_col2
-                Select Operator [SEL_5] (rows=3424/10 width=251)
+                Select Operator [SEL_5] (rows=4586/10 width=251)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
-                  Filter Operator [FIL_16] (rows=3424/10 width=251)
-                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                  Filter Operator [FIL_16] (rows=4586/10 width=251)
+                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                     TableScan [TS_3] (rows=12288/12288 width=251)
                       default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
 
@@ -147,7 +147,7 @@ Stage-0
           SHUFFLE [RS_11]
             Group By Operator [GBY_10] (rows=1/1 width=8)
               Output:["_col0"],aggregations:["count()"]
-              Merge Join Operator [MERGEJOIN_19] (rows=7286/10 width=8)
+              Merge Join Operator [MERGEJOIN_19] (rows=9759/10 width=8)
                 Conds:RS_6._col0=RS_7._col0(Inner)
               <-Map 1 [SIMPLE_EDGE]
                 SHUFFLE [RS_6]
@@ -161,10 +161,10 @@ Stage-0
               <-Map 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_7]
                   PartitionCols:_col0
-                  Select Operator [SEL_5] (rows=3424/10 width=8)
+                  Select Operator [SEL_5] (rows=4586/10 width=8)
                     Output:["_col0"]
-                    Filter Operator [FIL_18] (rows=3424/10 width=8)
-                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                    Filter Operator [FIL_18] (rows=4586/10 width=8)
+                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                       TableScan [TS_3] (rows=12288/12288 width=8)
                         default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["cint","cbigint"]
 
@@ -247,7 +247,7 @@ Stage-0
                 PartitionCols:_col0
                 Group By Operator [GBY_10] (rows=2765/5 width=12)
                   Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                  Merge Join Operator [MERGEJOIN_21] (rows=7286/10 width=4)
+                  Merge Join Operator [MERGEJOIN_21] (rows=9759/10 width=4)
                     Conds:RS_6._col1=RS_7._col0(Inner),Output:["_col0"]
                   <-Map 1 [SIMPLE_EDGE]
                     SHUFFLE [RS_6]
@@ -261,10 +261,10 @@ Stage-0
                   <-Map 5 [SIMPLE_EDGE]
                     SHUFFLE [RS_7]
                       PartitionCols:_col0
-                      Select Operator [SEL_5] (rows=3424/10 width=8)
+                      Select Operator [SEL_5] (rows=4586/10 width=8)
                         Output:["_col0"]
-                        Filter Operator [FIL_20] (rows=3424/10 width=8)
-                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                        Filter Operator [FIL_20] (rows=4586/10 width=8)
+                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                           TableScan [TS_3] (rows=12288/12288 width=8)
                             default@alltypesorc,b,Tbl:COMPLETE,Col:COMPLETE,Output:["cint","cbigint"]
 
@@ -353,7 +353,7 @@ Stage-0
                 Select Operator [SEL_5] (rows=6144/10 width=215)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                   Filter Operator [FIL_16] (rows=6144/10 width=215)
-                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                     TableScan [TS_3] (rows=12288/12288 width=215)
                       default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
             <-Map 1 [CUSTOM_SIMPLE_EDGE]
@@ -450,7 +450,7 @@ Stage-0
                   Select Operator [SEL_5] (rows=6144/10 width=215)
                     Output:["_col0"]
                     Filter Operator [FIL_18] (rows=6144/10 width=215)
-                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                       TableScan [TS_3] (rows=12288/12288 width=215)
                         default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
               <-Map 1 [CUSTOM_SIMPLE_EDGE]
@@ -550,7 +550,7 @@ Stage-0
                       Select Operator [SEL_5] (rows=6144/10 width=215)
                         Output:["_col0"]
                         Filter Operator [FIL_20] (rows=6144/10 width=215)
-                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
+                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
                           TableScan [TS_3] (rows=12288/12288 width=215)
                             default@alltypesorc,b,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
                   <-Map 1 [CUSTOM_SIMPLE_EDGE]


[42/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_interval_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_interval_1.q b/ql/src/test/queries/clientpositive/vector_interval_1.q
index f4f0024..8fefe41 100644
--- a/ql/src/test/queries/clientpositive/vector_interval_1.q
+++ b/ql/src/test/queries/clientpositive/vector_interval_1.q
@@ -1,7 +1,8 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
+
 set hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 drop table if exists vector_interval_1;
 create table vector_interval_1 (ts timestamp, dt date, str1 string, str2 string) stored as orc;
@@ -12,7 +13,7 @@ insert into vector_interval_1
   select null, null, null, null from src limit 1;
 
 -- constants/cast from string
-explain vectorization expression
+explain
 select
   str1,
   interval '1-2' year to month, interval_year_month(str1),
@@ -27,7 +28,7 @@ from vector_interval_1 order by str1;
 
 
 -- interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   interval '1-2' year to month + interval '1-2' year to month,
@@ -48,7 +49,7 @@ select
   interval '1-2' year to month - interval_year_month(str1)
 from vector_interval_1 order by dt;
 
-explain vectorization expression
+explain
 select
   dt,
   interval '1 2:3:4' day to second + interval '1 2:3:4' day to second,
@@ -71,7 +72,7 @@ from vector_interval_1 order by dt;
 
 
 -- date-interval arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt + interval '1-2' year to month,
@@ -106,7 +107,7 @@ from vector_interval_1 order by dt;
 
 
 -- timestamp-interval arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts + interval '1-2' year to month,
@@ -141,7 +142,7 @@ from vector_interval_1 order by ts;
 
 
 -- timestamp-timestamp arithmetic
-explain vectorization expression
+explain
 select
   ts,
   ts - ts,
@@ -158,7 +159,7 @@ from vector_interval_1 order by ts;
 
 
 -- date-date arithmetic
-explain vectorization expression
+explain
 select
   dt,
   dt - dt,
@@ -175,7 +176,7 @@ from vector_interval_1 order by dt;
 
 
 -- date-timestamp arithmetic
-explain vectorization expression
+explain
 select
   dt,
   ts - dt,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_interval_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_interval_2.q b/ql/src/test/queries/clientpositive/vector_interval_2.q
index 0b78a4b..5afb511 100644
--- a/ql/src/test/queries/clientpositive/vector_interval_2.q
+++ b/ql/src/test/queries/clientpositive/vector_interval_2.q
@@ -1,7 +1,7 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 set hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 drop table if exists vector_interval_2;
 create table vector_interval_2 (ts timestamp, dt date, str1 string, str2 string, str3 string, str4 string) stored as orc;
@@ -14,7 +14,7 @@ insert into vector_interval_2
 
 -- interval comparisons in select clause
 
-explain vectorization expression
+explain
 select
   str1,
   -- Should all be true
@@ -77,7 +77,7 @@ select
   interval '1-2' year to month != interval_year_month(str2)
 from vector_interval_2 order by str1;
 
-explain vectorization expression
+explain
 select
   str1,
   -- Should all be false
@@ -128,7 +128,7 @@ select
   interval '1-2' year to month != interval_year_month(str1)
 from vector_interval_2 order by str1;
 
-explain vectorization expression
+explain
 select
   str3,
   -- Should all be true
@@ -191,7 +191,7 @@ select
   interval '1 2:3:4' day to second != interval_day_time(str4)
 from vector_interval_2 order by str3;
 
-explain vectorization expression
+explain
 select
   str3,
   -- Should all be false
@@ -244,7 +244,7 @@ from vector_interval_2 order by str3;
 
 
 -- interval expressions in predicates
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   interval_year_month(str1) = interval_year_month(str1)
@@ -293,7 +293,7 @@ where
   and interval '1-3' year to month > interval_year_month(str1)
 order by ts;
 
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   interval_day_time(str3) = interval_day_time(str3)
@@ -342,7 +342,7 @@ where
   and interval '1 2:3:5' day to second > interval_day_time(str3)
 order by ts;
 
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   date '2002-03-01' = dt + interval_year_month(str1)
@@ -381,7 +381,7 @@ where
   and dt != dt + interval '1-2' year to month
 order by ts;
 
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   timestamp '2002-03-01 01:02:03' = ts + interval '1-2' year to month
@@ -431,7 +431,7 @@ where
 order by ts;
 
 -- day to second expressions in predicate
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = dt + interval '0 1:2:3' day to second
@@ -480,7 +480,7 @@ where
   and ts > dt - interval '0 1:2:4' day to second
 order by ts;
 
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = ts + interval '0' day

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_interval_arithmetic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_interval_arithmetic.q b/ql/src/test/queries/clientpositive/vector_interval_arithmetic.q
index 9a551e8..40c4c03 100644
--- a/ql/src/test/queries/clientpositive/vector_interval_arithmetic.q
+++ b/ql/src/test/queries/clientpositive/vector_interval_arithmetic.q
@@ -13,7 +13,7 @@ insert overwrite table interval_arithmetic_1
 SET hive.vectorized.execution.enabled=true;
 
 -- interval year-month arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '2-2' year to month,
@@ -36,7 +36,7 @@ select
 from interval_arithmetic_1
 order by dateval;
 
-explain vectorization expression
+explain
 select
   dateval,
   dateval - date '1999-06-07',
@@ -53,7 +53,7 @@ select
 from interval_arithmetic_1
 order by dateval;
 
-explain vectorization expression
+explain
 select
   tsval,
   tsval - interval '2-2' year to month,
@@ -76,7 +76,7 @@ select
 from interval_arithmetic_1
 order by tsval;
 
-explain vectorization expression
+explain
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
@@ -93,7 +93,7 @@ limit 2;
 
 
 -- interval day-time arithmetic
-explain vectorization expression
+explain
 select
   dateval,
   dateval - interval '99 11:22:33.123456789' day to second,
@@ -116,7 +116,7 @@ select
 from interval_arithmetic_1
 order by dateval;
 
-explain vectorization expression
+explain
 select
   dateval,
   tsval,
@@ -135,7 +135,7 @@ select
 from interval_arithmetic_1
 order by dateval;
 
-explain vectorization expression
+explain
 select
   tsval,
   tsval - interval '99 11:22:33.123456789' day to second,
@@ -158,7 +158,7 @@ select
 from interval_arithmetic_1
 order by tsval;
 
-explain vectorization expression
+explain
 select
   interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q b/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q
index d27e67b..36ccd35 100644
--- a/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 create table vectortab_a_1k(
             t tinyint,
@@ -46,7 +45,7 @@ LOAD DATA LOCAL INPATH '../../data/files/vectortab_b_1k' OVERWRITE INTO TABLE ve
 
 CREATE TABLE vectortab_b_1korc STORED AS ORC AS SELECT * FROM vectortab_b_1k;
 
-explain vectorization expression
+explain
 select
    v1.s,
    v2.s,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_join.q b/ql/src/test/queries/clientpositive/vector_join.q
index b086a13..9238a6e 100644
--- a/ql/src/test/queries/clientpositive/vector_join.q
+++ b/ql/src/test/queries/clientpositive/vector_join.q
@@ -32,7 +32,6 @@ TBLPROPERTIES (
 set hive.auto.convert.join=false;
 set hive.vectorized.execution.enabled = true;
 set hive.mapred.mode=nonstrict;
-set hive.fetch.task.conversion=none;
 SELECT cr.id1 ,
 cr.id2
 FROM

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_join30.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_join30.q b/ql/src/test/queries/clientpositive/vector_join30.q
index 6557a71..1467cd3 100644
--- a/ql/src/test/queries/clientpositive/vector_join30.q
+++ b/ql/src/test/queries/clientpositive/vector_join30.q
@@ -10,7 +10,7 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000;
 
 CREATE TABLE orcsrc STORED AS ORC AS SELECT * FROM src;
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -25,7 +25,7 @@ JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -40,7 +40,7 @@ LEFT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN
@@ -55,7 +55,7 @@ RIGHT OUTER JOIN
 ON (x.key = Y.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -76,7 +76,7 @@ JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 JOIN
@@ -97,7 +97,7 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -118,7 +118,7 @@ LEFT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 LEFT OUTER JOIN
@@ -139,7 +139,7 @@ RIGHT OUTER JOIN
 ON (x.key = Z.key)
 select sum(hash(Y.key,Y.value));
 
-explain vectorization expression
+explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x
 RIGHT OUTER JOIN

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_join_part_col_char.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_join_part_col_char.q b/ql/src/test/queries/clientpositive/vector_join_part_col_char.q
index e625a64..5cfce37 100644
--- a/ql/src/test/queries/clientpositive/vector_join_part_col_char.q
+++ b/ql/src/test/queries/clientpositive/vector_join_part_col_char.q
@@ -4,7 +4,6 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.vectorized.execution.enabled=true;
 set hive.explain.user=true;
 set hive.metastore.fastpath=false;
-set hive.fetch.task.conversion=none;
 
 drop table if exists char_part_tbl1 ;
 drop table if exists char_part_tbl2;
@@ -23,7 +22,7 @@ insert into table char_tbl2 partition(gpa='3') select name, age from studenttab
 show partitions char_tbl1;
 show partitions char_tbl2;
 
-explain vectorization select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa);
+explain select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa);
 select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa);
 
 set hive.vectorized.execution.enabled=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_left_outer_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join.q b/ql/src/test/queries/clientpositive/vector_left_outer_join.q
index 0684f12..ddf2660 100644
--- a/ql/src/test/queries/clientpositive/vector_left_outer_join.q
+++ b/ql/src/test/queries/clientpositive/vector_left_outer_join.q
@@ -3,9 +3,7 @@ set hive.explain.user=false;
 set hive.vectorized.execution.enabled=true;
 set hive.auto.convert.join=true;
 set hive.mapjoin.hybridgrace.hashtable=false;
-set hive.fetch.task.conversion=none;
-
-explain vectorization 
+explain 
 select count(*) from (select c.ctinyint 
 from alltypesorc c
 left outer join alltypesorc cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
index ccceb36..5da5d50 100644
--- a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
+++ b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
@@ -20,14 +20,14 @@ INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE;
 
 set hive.vectorized.execution.enabled=false;
 set hive.mapjoin.hybridgrace.hashtable=false;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 set hive.vectorized.execution.enabled=false;
 set hive.mapjoin.hybridgrace.hashtable=true;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
@@ -36,7 +36,7 @@ select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left out
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=false;
 SET hive.vectorized.execution.mapjoin.native.enabled=false;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
@@ -44,7 +44,7 @@ select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left out
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=false;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
@@ -52,7 +52,7 @@ select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left out
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=false;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
@@ -60,7 +60,7 @@ select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left out
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-explain vectorization expression
+explain
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
 
 select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
index 8469a06..dfb8405 100644
--- a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
@@ -26,502 +26,381 @@ select * from t4;
 set hive.vectorized.execution.enabled=false;
 set hive.mapjoin.hybridgrace.hashtable=false;
 
-explain vectorization only summary
-
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization only summary
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization only summary
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization only summary
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization only summary
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization only summary
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization only summary
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization only summary
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization only summary
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization only summary
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization only summary
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization only summary
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization only summary
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 
 set hive.vectorized.execution.enabled=false;
 set hive.mapjoin.hybridgrace.hashtable=true;
 
-explain vectorization summary
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization summary
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization summary
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization summary
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization summary
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization summary
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization summary
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization summary
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization summary
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization summary
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization summary
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization summary
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization summary
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization summary
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=false;
 SET hive.vectorized.execution.mapjoin.native.enabled=false;
 
-explain vectorization only operator
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization only operator
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization only operator
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization only operator
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization only operator
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization only operator
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization only operator
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization only operator
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization only operator
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization only operator
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization only operator
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization only operator
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization only operator
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization only operator
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=false;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization detail
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization detail
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=false;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization detail
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization detail
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 
 set hive.vectorized.execution.enabled=true;
 set hive.mapjoin.hybridgrace.hashtable=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
 
-explain vectorization detail
-select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
 
-explain vectorization detail
-select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
 
-explain vectorization detail
-select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
 
-explain vectorization detail
-select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
 select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
  
-explain vectorization detail
-select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
 
-explain vectorization detail
-select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
 
-explain vectorization detail
-select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q b/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q
index d960559..c9e9e48 100644
--- a/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q
+++ b/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -10,7 +9,7 @@ set hive.fetch.task.conversion=none;
 -- Query copied from subquery_in.q
 
 -- non agg, non corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
@@ -24,7 +23,7 @@ where li.l_linenumber = 1 and
 ;
 
 -- non agg, corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q
index 22830a5..1f17669 100644
--- a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q
+++ b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 create table date_dim
 (
@@ -105,7 +104,7 @@ stored as orc;
 -- For MR, we are verifying this query DOES NOT vectorize the Map vertex with
 -- the 2 TableScanOperators that have different schema.
 
-explain vectorization select
+explain select
         s_state, count(1)
  from store_sales,
  store,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_multi_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_multi_insert.q b/ql/src/test/queries/clientpositive/vector_multi_insert.q
index c56ee1c..374a0da 100644
--- a/ql/src/test/queries/clientpositive/vector_multi_insert.q
+++ b/ql/src/test/queries/clientpositive/vector_multi_insert.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
+set hive.fetch.task.conversion=minimal;
 
 create table orc1
   stored as orc
@@ -22,7 +22,7 @@ create table orc_rn3 (rn int);
 
 analyze table orc1 compute statistics;
 
-explain vectorization from orc1 a
+explain from orc1 a
 insert overwrite table orc_rn1 select a.* where a.rn < 100
 insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000
 insert overwrite table orc_rn3 select a.* where a.rn >= 1000;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q b/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
index 113ea7f..69142bf 100644
--- a/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
+++ b/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
@@ -1,4 +1,4 @@
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
-explain vectorization SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint);
\ No newline at end of file
+explain SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_non_string_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_non_string_partition.q b/ql/src/test/queries/clientpositive/vector_non_string_partition.q
index cf3e765..bffc93e 100644
--- a/ql/src/test/queries/clientpositive/vector_non_string_partition.q
+++ b/ql/src/test/queries/clientpositive/vector_non_string_partition.q
@@ -4,17 +4,16 @@ SET hive.vectorized.execution.enabled=true;
 CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC;
 SET hive.exec.dynamic.partition.mode=nonstrict;
 SET hive.exec.dynamic.partition=true;
-set hive.fetch.task.conversion=none;
 
 INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc 
 WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble;
 
 SHOW PARTITIONS non_string_part;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10;
+EXPLAIN SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10;
 
 SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10;
+EXPLAIN SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10;
 
 SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_null_projection.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_null_projection.q b/ql/src/test/queries/clientpositive/vector_null_projection.q
index 711b8e7..66c0838 100644
--- a/ql/src/test/queries/clientpositive/vector_null_projection.q
+++ b/ql/src/test/queries/clientpositive/vector_null_projection.q
@@ -10,12 +10,12 @@ insert into table a values('aaa');
 insert into table b values('aaa');
 
 -- We expect no vectorization due to NULL (void) projection type.
-explain vectorization expression
+explain
 select NULL from a;
 
 select NULL from a;
 
-explain vectorization expression
+explain
 select NULL as x from a union distinct select NULL as x from b;
 
 select NULL as x from a union distinct select NULL as x from b;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_nullsafe_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_nullsafe_join.q b/ql/src/test/queries/clientpositive/vector_nullsafe_join.q
index 6a7ff72..b316a54 100644
--- a/ql/src/test/queries/clientpositive/vector_nullsafe_join.q
+++ b/ql/src/test/queries/clientpositive/vector_nullsafe_join.q
@@ -1,7 +1,6 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 --
@@ -21,19 +20,19 @@ CREATE TABLE myinput1 STORED AS ORC AS SELECT * FROM myinput1_txt;
 SET hive.vectorized.execution.mapjoin.native.enabled=false;
 
 -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
 select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
 select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
 
 -- outer joins
@@ -48,19 +47,19 @@ SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
 
 -- merging
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
 select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
 select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
 
-explain vectorization expression select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
 select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
 
 -- outer joins

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_number_compare_projection.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_number_compare_projection.q b/ql/src/test/queries/clientpositive/vector_number_compare_projection.q
index 3f4f5aa..feb5e98 100644
--- a/ql/src/test/queries/clientpositive/vector_number_compare_projection.q
+++ b/ql/src/test/queries/clientpositive/vector_number_compare_projection.q
@@ -35,7 +35,7 @@ SET hive.vectorized.execution.enabled=true;
 --
 -- Projection LongCol<Compare>LongScalar
 --
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, (t < 0) as compare1, (si <= 0) as compare2, (i = 0) as compare3 from vectortab2k_orc
         order by t, si, i) as q;
@@ -44,7 +44,7 @@ SELECT sum(hash(*)) FROM
     (SELECT t, si, i, (t < 0) as compare1, (si <= 0) as compare2, (i = 0) as compare3 from vectortab2k_orc
         order by t, si, i) as q;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN
 SELECT sum(hash(*)) FROM
     (SELECT t, si, i, b, (t > 0) as compare1, (si >= 0) as compare2, (i != 0) as compare3, (b > 0) as compare4 from vectortab2k_orc
         order by t, si, i, b) as q;

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_nvl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_nvl.q b/ql/src/test/queries/clientpositive/vector_nvl.q
index e00e82f..742bf52 100644
--- a/ql/src/test/queries/clientpositive/vector_nvl.q
+++ b/ql/src/test/queries/clientpositive/vector_nvl.q
@@ -1,8 +1,7 @@
 SET hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT cdouble, nvl(cdouble, 100) as n
+EXPLAIN SELECT cdouble, nvl(cdouble, 100) as n
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 LIMIT 10;
@@ -12,7 +11,7 @@ FROM alltypesorc
 WHERE (cdouble IS NULL)
 LIMIT 10;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT cfloat, nvl(cfloat, 1) as n
+EXPLAIN SELECT cfloat, nvl(cfloat, 1) as n
 FROM alltypesorc
 LIMIT 10;
 
@@ -20,7 +19,7 @@ SELECT cfloat, nvl(cfloat, 1) as n
 FROM alltypesorc
 LIMIT 10;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, 10) as n
+EXPLAIN SELECT nvl(null, 10) as n
 FROM alltypesorc
 LIMIT 10;
 
@@ -28,7 +27,7 @@ SELECT nvl(null, 10) as n
 FROM alltypesorc
 LIMIT 10;
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, null) as n
+EXPLAIN SELECT nvl(null, null) as n
 FROM alltypesorc
 LIMIT 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_orderby_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_orderby_5.q b/ql/src/test/queries/clientpositive/vector_orderby_5.q
index 17ccf82..30bcaef 100644
--- a/ql/src/test/queries/clientpositive/vector_orderby_5.q
+++ b/ql/src/test/queries/clientpositive/vector_orderby_5.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
-set hive.fetch.task.conversion=none;
 
 create table vectortab2k(
             t tinyint,
@@ -40,7 +39,7 @@ STORED AS ORC;
 
 INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k;
 
-explain vectorization expression
+explain
 select bo, max(b) from vectortab2korc group by bo order by bo desc;
 
 select bo, max(b) from vectortab2korc group by bo order by bo desc;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join0.q b/ql/src/test/queries/clientpositive/vector_outer_join0.q
index d7586c7..dce3a1b 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join0.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join0.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
-set hive.fetch.task.conversion=none;
 
 CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC;
 CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC; 
@@ -13,14 +12,14 @@ insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null,
 select * from orc_table_1;
 select * from orc_table_2;
 
-explain vectorization detail
+explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c;
 
 -- SORT_QUERY_RESULTS
 
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c;
 
-explain vectorization detail
+explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join1.q b/ql/src/test/queries/clientpositive/vector_outer_join1.q
index 6cb2e45..4a36452 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join1.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join1.q
@@ -2,7 +2,6 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- Using cint and ctinyint in test queries
 create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5;
@@ -29,7 +28,7 @@ ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS;
 
 select * from small_alltypesorc_a;
 
-explain vectorization detail
+explain
 select * 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -42,7 +41,7 @@ from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
   on cd.cint = c.cint;
 
-explain vectorization detail
+explain
 select c.ctinyint 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
@@ -55,7 +54,7 @@ from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint;
 
-explain vectorization detail
+explain
 select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join2.q b/ql/src/test/queries/clientpositive/vector_outer_join2.q
index da17806..d3b5805 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join2.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join2.q
@@ -2,7 +2,6 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- Using cint and cbigint in test queries
 create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5;
@@ -29,7 +28,7 @@ ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS;
 
 select * from small_alltypesorc_a;
 
-explain vectorization detail
+explain
 select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join3.q b/ql/src/test/queries/clientpositive/vector_outer_join3.q
index 3f28251..e5fc0a9 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join3.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join3.q
@@ -2,7 +2,6 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- Using cint and cstring1 in test queries
 create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5;
@@ -28,7 +27,7 @@ ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS;
 ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS;
 
 select * from small_alltypesorc_a;
-explain vectorization detail formatted
+explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -48,7 +47,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -68,7 +67,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select c.cstring1 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join4.q b/ql/src/test/queries/clientpositive/vector_outer_join4.q
index d024687..45461b5 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join4.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join4.q
@@ -2,7 +2,6 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 SET hive.auto.convert.join=true;
 SET hive.vectorized.execution.mapjoin.native.enabled=true;
-set hive.fetch.task.conversion=none;
 
 -- Using cint and ctinyint in test queries
 create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10;
@@ -29,7 +28,7 @@ ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS;
 
 select * from small_alltypesorc_b;
 
-explain vectorization detail formatted
+explain
 select * 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
@@ -42,7 +41,7 @@ from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd
   on cd.cint = c.cint;
 
-explain vectorization detail formatted
+explain
 select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
@@ -55,7 +54,7 @@ from small_alltypesorc_b c
 left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select c.ctinyint 
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/queries/clientpositive/vector_outer_join5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_outer_join5.q b/ql/src/test/queries/clientpositive/vector_outer_join5.q
index b8e788a..18b9ab4 100644
--- a/ql/src/test/queries/clientpositive/vector_outer_join5.q
+++ b/ql/src/test/queries/clientpositive/vector_outer_join5.q
@@ -5,7 +5,6 @@ SET hive.vectorized.execution.mapjoin.native.enabled=true;
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
-set hive.fetch.task.conversion=none;
 
 -- SORT_QUERY_RESULTS
 
@@ -23,7 +22,7 @@ as orc as select ctinyint, cbigint from alltypesorc limit 100;
 ANALYZE TABLE small_table COMPUTE STATISTICS;
 ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
@@ -36,7 +35,7 @@ left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -49,7 +48,7 @@ left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -62,7 +61,7 @@ left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -75,7 +74,7 @@ left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -106,7 +105,7 @@ as orc as select pmod(ctinyint, 16) as cmodtinyint, cbigint from alltypesorc lim
 ANALYZE TABLE small_table2 COMPUTE STATISTICS;
 ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
@@ -119,7 +118,7 @@ left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -132,7 +131,7 @@ left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -145,7 +144,7 @@ left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -158,7 +157,7 @@ left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1;
 
-explain vectorization detail formatted
+explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm


[38/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
index 1d4163c..437770d 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
@@ -87,73 +87,25 @@ POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).b SIMPLE [(valu
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_permute_select
-                  Statistics: Num rows: 2 Data size: 33 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=16)
+            default@part_add_int_permute_select,part_add_int_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_permute_select
@@ -254,73 +206,25 @@ POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).c EXPRES
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_string_permute_select
-                  Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, d:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=19)
+            default@part_add_int_string_permute_select,part_add_int_string_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting works right
 select insert_num,part,a,b from part_add_int_string_permute_select
@@ -483,73 +387,25 @@ POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c2 SIMPLE [
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	double1	double1	double1	_c4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 284 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=5 width=56)
+            default@part_change_string_group_double,part_change_string_group_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
@@ -649,73 +505,25 @@ POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_date_group_string_group_date_timestamp
-                  Statistics: Num rows: 6 Data size: 926 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), c6 (type: string), c7 (type: char(50)), c8 (type: char(15)), c9 (type: varchar(50)), c10 (type: varchar(15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:string, c2:char(50), c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), c8:char(15), c9:varchar(50), c10:varchar(15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=154)
+            default@part_change_date_group_string_group_date_timestamp,part_change_date_group_string_group_date_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
@@ -892,73 +700,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19	_col20	_col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 6 Data size: 918 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22"]
+          TableScan [TS_0] (rows=6 width=153)
+            default@part_change_numeric_group_string_group_multi_ints_string_group,part_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1117,73 +877,25 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 6 Data size: 1386 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
+          TableScan [TS_0] (rows=6 width=231)
+            default@part_change_numeric_group_string_group_floating_string_group,part_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
@@ -1330,73 +1042,25 @@ POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_string_group_string_group_string PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_string_group_string
-                  Statistics: Num rows: 6 Data size: 421 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 (type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:char(50), c2:char(9), c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, c8:char(50), c9:char(9), c10:string, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=70)
+            default@part_change_string_group_string_group_string,part_change_string_group_string_group_string,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string
 PREHOOK: type: QUERY
@@ -1577,73 +1241,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint P
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).c9 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4	_col5	_col6	_col7	_col8	_col9	_col10	_col11	_col12	_col13	_col14	_col15	_col16	_col17	_col18	_col19
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint
-                  Statistics: Num rows: 6 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 (type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 (type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 20
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    dataColumns: insert_num:int, c1:smallint, c2:int, c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
+          TableScan [TS_0] (rows=6 width=143)
+            default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint,part_change_lower_to_higher_numeric_group_tinyint_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
@@ -1754,73 +1370,25 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PA
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).c3 EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float PARTITION(part=1).insert_num EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 _col0	_col1	_col2	_col3	_col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_lower_to_higher_numeric_group_decimal_to_float
-                  Statistics: Num rows: 6 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: float), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:float, c2:double, c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=6 width=71)
+            default@part_change_lower_to_higher_numeric_group_decimal_to_float,part_change_lower_to_higher_numeric_group_decimal_to_float,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
index aa09b53..e35222b 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
@@ -153,55 +153,25 @@ POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).insert_num SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).s1 SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:s1, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct1_c_txt.insert_num	complex_struct1_c_txt.s1	complex_struct1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_struct1
-                  Statistics: Num rows: 6 Data size: 931 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), s1 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s1] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=6 width=155)
+            default@part_change_various_various_struct1,part_change_various_various_struct1,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","s1","b"]
 
 PREHOOK: query: select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
@@ -447,55 +417,25 @@ POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).b SIMPLE [
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).insert_num SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).s2 SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:s2, type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, comment:null), ]
 complex_struct2_d_txt.insert_num	complex_struct2_d_txt.b	complex_struct2_d_txt.s2
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_various_various_struct2
-                  Statistics: Num rows: 8 Data size: 939 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s2 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string> of Column[s2] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=8 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=8 width=117)
+            default@part_add_various_various_struct2,part_add_various_various_struct2,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s2"]
 
 PREHOOK: query: select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
@@ -665,55 +605,25 @@ POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).insert_num SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).s3 SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:s3, type:struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>, comment:null), ]
 complex_struct4_c_txt.insert_num	complex_struct4_c_txt.b	complex_struct4_c_txt.s3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_to_various_various_struct4
-                  Statistics: Num rows: 4 Data size: 353 Basic stats: COMPLETE Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b (type: string), s3 (type: struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary> of Column[s3] not supported
-                vectorized: false
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=4 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=4 width=88)
+            default@part_add_to_various_various_struct4,part_add_to_various_various_struct4,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s3"]
 
 PREHOOK: query: select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
index e8b5c71..fb38687 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
@@ -282,73 +282,25 @@ POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	boolean1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	smallint1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	int1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	bigint1	_c54
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_boolean_to_bigint
-                  Statistics: Num rows: 10 Data size: 4707 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 (type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 (type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 (type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 (type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 55, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 55
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]
-                    dataColumns: insert_num:int, c1:boolean, c2:boolean, c3:boolean, c4:boolean, c5:boolean, c6:boolean, c7:boolean, c8:boolean, c9:boolean, c10:tinyint, c11:tinyint, c12:tinyint, c13:tinyint, c14:tinyint, c15:tinyint, c16:tinyint, c17:tinyint, c18:tinyint, c19:tinyint, c20:tinyint, c21:smallint, c22:smallint, c23:smallint, c24:smallint, c25:smallint, c26:smallint, c27:smallint, c28:smallint, c29:smallint, c30:smallint, c31:smallint, c32:int, c33:int, c34:int, c35:int, c36:int, c37:int, c38:int, c39:int, c40:int, c41:int, c42:int, c43:bigint, c44:bigint, c45:bigint, c46:bigint, c47:bigint, c48:bigint, c49:bigint, c50:bigint, c51:bigint, c52:bigint, c53:bigint, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=10 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
+          TableScan [TS_0] (rows=10 width=470)
+            default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -545,73 +497,25 @@ POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	decimal1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	float1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	double1	_c34
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_decimal_to_double
-                  Statistics: Num rows: 6 Data size: 2551 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 (type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 (type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 (type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 (type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 (type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 (type: double), c33 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 35, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 35
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]
-                    dataColumns: insert_num:int, c1:decimal(38,18), c2:decimal(38,18), c3:decimal(38,18), c4:decimal(38,18), c5:decimal(38,18), c6:decimal(38,18), c7:decimal(38,18), c8:decimal(38,18), c9:decimal(38,18), c10:decimal(38,18), c11:decimal(38,18), c12:float, c13:float, c14:float, c15:float, c16:float, c17:float, c18:float, c19:float, c20:float, c21:float, c22:float, c23:double, c24:double, c25:double, c26:double, c27:double, c28:double, c29:double, c30:double, c31:double, c32:double, c33:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
+          TableScan [TS_0] (rows=6 width=425)
+            default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -724,73 +628,25 @@ POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	_c13
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_timestamp
-                  Statistics: Num rows: 6 Data size: 870 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: timestamp), c12 (type: timestamp), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 14, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 14
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
-                    dataColumns: insert_num:int, c1:timestamp, c2:timestamp, c3:timestamp, c4:timestamp, c5:timestamp, c6:timestamp, c7:timestamp, c8:timestamp, c9:timestamp, c10:timestamp, c11:timestamp, c12:timestamp, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
+          TableScan [TS_0] (rows=6 width=145)
+            default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -887,73 +743,25 @@ POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, type:int, comment:null), ]
 insert_num	date1	date1	date1	date1	_c5
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_date
-                  Statistics: Num rows: 6 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 6, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 6
-                    includeColumns: [0, 1, 2, 3, 4, 5]
-                    dataColumns: insert_num:int, c1:date, c2:date, c3:date, c4:date, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+          TableScan [TS_0] (rows=6 width=62)
+            default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
@@ -1131,73 +939,25 @@ POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).insert_num SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, comment:null), ]
 same_type1_c_txt.insert_num	same_type1_c_txt.c1	same_type1_c_txt.c2	same_type1_c_txt.c3	same_type1_c_txt.c4	same_type1_c_txt.c5	same_type1_c_txt.c6	same_type1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_same_type_different_params
-                  Statistics: Num rows: 13 Data size: 1311 Basic stats: COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 (type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 1, 2, 3, 4, 5, 6, 7]
-                    Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 8
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                    dataColumns: insert_num:int, c1:char(8), c2:char(32), c3:varchar(15), c4:varchar(18), c5:decimal(10,2), c6:decimal(25,15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=13 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+          TableScan [TS_0] (rows=13 width=100)
+            default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params
 PREHOOK: type: QUERY


[17/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join6.q.out
index 9a95606..9369661 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join6.q.out
@@ -126,15 +126,113 @@ POSTHOOK: Output: default@TJOIN4
 POSTHOOK: Lineage: tjoin4.c1 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c1, type:int, comment:null), ]
 POSTHOOK: Lineage: tjoin4.c2 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
 POSTHOOK: Lineage: tjoin4.rnum SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:rnum, type:int, comment:null), ]
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: rnum (type: int), c1 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col1 (type: int)
+                      outputColumnNames: _col0, _col2, _col3
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        Map Join Operator
+                          condition map:
+                               Left Outer Join0 to 1
+                          keys:
+                            0 _col2 (type: int)
+                            1 _col1 (type: int)
+                          outputColumnNames: _col0, _col1, _col3
+                          input vertices:
+                            1 Map 3
+                          Statistics: Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col0 (type: int), _col1 (type: int), _col3 (type: int)
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              Statistics: Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE
+                              table:
+                                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: rnum (type: int), c1 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin3
+                  Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: rnum (type: int), c1 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 PREHOOK: type: QUERY
@@ -153,15 +251,108 @@ POSTHOOK: Input: default@tjoin3
 0	3	0
 1	NULL	NULL
 2	NULL	NULL
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select tj1rnum, tj2rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select tj1rnum, tj2rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: rnum (type: int), c1 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col1 (type: int)
+                      outputColumnNames: _col0, _col2, _col3
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        Map Join Operator
+                          condition map:
+                               Left Outer Join0 to 1
+                          keys:
+                            0 _col2 (type: int)
+                            1 _col0 (type: int)
+                          outputColumnNames: _col0, _col1
+                          input vertices:
+                            1 Map 3
+                          Statistics: Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: rnum (type: int), c1 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin3
+                  Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: c1 (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select tj1rnum, tj2rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
index 38c55d6..ebe895f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
@@ -76,16 +76,12 @@ POSTHOOK: Lineage: inventory_part_0 PARTITION(par=2).inv_date_sk SIMPLE [(invent
 POSTHOOK: Lineage: inventory_part_0 PARTITION(par=2).inv_item_sk SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_item_sk, type:int, comment:null), ]
 POSTHOOK: Lineage: inventory_part_0 PARTITION(par=2).inv_quantity_on_hand SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_quantity_on_hand, type:int, comment:null), ]
 POSTHOOK: Lineage: inventory_part_0 PARTITION(par=2).inv_warehouse_sk SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_warehouse_sk, type:int, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -103,73 +99,31 @@ STAGE PLANS:
                 TableScan
                   alias: inventory_part_0
                   Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
                     outputColumnNames: inv_quantity_on_hand
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(inv_quantity_on_hand)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -250,16 +204,12 @@ POSTHOOK: Lineage: inventory_part_1 PARTITION(par=5cols).inv_date_sk SIMPLE [(in
 POSTHOOK: Lineage: inventory_part_1 PARTITION(par=5cols).inv_item_sk SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_item_sk, type:int, comment:null), ]
 POSTHOOK: Lineage: inventory_part_1 PARTITION(par=5cols).inv_quantity_on_hand SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_quantity_on_hand, type:int, comment:null), ]
 POSTHOOK: Lineage: inventory_part_1 PARTITION(par=5cols).inv_warehouse_sk SIMPLE [(inventory_txt)inventory_txt.FieldSchema(name:inv_warehouse_sk, type:int, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -277,73 +227,31 @@ STAGE PLANS:
                 TableScan
                   alias: inventory_part_1
                   Statistics: Num rows: 200 Data size: 13476 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
                     outputColumnNames: inv_quantity_on_hand
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 200 Data size: 13476 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(inv_quantity_on_hand)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -426,16 +334,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@inventory_part_2a
 POSTHOOK: Input: default@inventory_part_2a@par=2
 POSTHOOK: Output: default@inventory_part_2a@par=2
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_2a
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_2a
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -453,73 +357,31 @@ STAGE PLANS:
                 TableScan
                   alias: inventory_part_2a
                   Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
                     outputColumnNames: inv_quantity_on_hand
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(inv_quantity_on_hand)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -583,16 +445,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@inventory_part_2b
 POSTHOOK: Input: default@inventory_part_2b@par1=2/par2=3
 POSTHOOK: Output: default@inventory_part_2b@par1=2/par2=3
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_2b
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_2b
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -610,73 +468,31 @@ STAGE PLANS:
                 TableScan
                   alias: inventory_part_2b
                   Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
                     outputColumnNames: inv_quantity_on_hand
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(inv_quantity_on_hand)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -746,16 +562,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
 POSTHOOK: Input: default@inventory_part_3
 POSTHOOK: Input: default@inventory_part_3@par=2
 POSTHOOK: Output: default@inventory_part_3@par=2
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_3
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select sum(inv_quantity_on_hand) from inventory_part_3
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -773,73 +585,31 @@ STAGE PLANS:
                 TableScan
                   alias: inventory_part_3
                   Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
                     outputColumnNames: inv_quantity_on_hand
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 200 Data size: 4776 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(inv_quantity_on_hand)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0]
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[54/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
index 71e470b..87d4c70 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
@@ -434,7 +434,14 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: ctinyint (type: tinyint), cdouble (type: double)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 0
+              ListSink
 
 PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out
index eb2a692..cebbcdf 100644
--- a/ql/src/test/results/clientpositive/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out
@@ -417,7 +417,17 @@ STAGE PLANS:
     Fetch Operator
       limit: 0
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: ctinyint (type: tinyint), cdouble (type: double)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 0
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 PREHOOK: type: QUERY


[10/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index c2e1dfd..76c8404 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -34,14 +34,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 11
 12
-PREHOOK: query: EXPLAIN VECTORIZATION create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
 PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: EXPLAIN VECTORIZATION create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
 POSTHOOK: type: CREATETABLE_AS_SELECT
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -78,19 +74,8 @@ STAGE PLANS:
                         Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -214,15 +199,11 @@ POSTHOOK: Output: default@srcpart_double_hour
 POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -253,10 +234,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -292,14 +269,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -321,13 +290,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -367,14 +329,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_date
 #### A masked pattern was here ####
 1000
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -405,10 +363,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -429,14 +383,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -458,13 +404,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -518,17 +457,13 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -560,10 +495,6 @@ STAGE PLANS:
                       value expressions: _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -599,14 +530,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -642,14 +565,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -687,13 +602,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -737,16 +645,12 @@ POSTHOOK: Input: default@srcpart_date
 POSTHOOK: Input: default@srcpart_hour
 #### A masked pattern was here ####
 500
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -779,10 +683,6 @@ STAGE PLANS:
                       value expressions: _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -803,14 +703,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -831,14 +723,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -876,13 +760,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -938,15 +815,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 500
 PREHOOK: query: -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -976,10 +849,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1030,14 +899,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1059,13 +920,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1105,14 +959,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_date_hour
 #### A masked pattern was here ####
 500
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1143,10 +993,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1167,14 +1013,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1196,13 +1034,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1254,15 +1085,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 500
 PREHOOK: query: -- empty set
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- empty set
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1293,10 +1120,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1332,14 +1155,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1361,13 +1176,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1407,14 +1215,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_date
 #### A masked pattern was here ####
 0
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1445,10 +1249,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1469,14 +1269,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1498,13 +1290,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1554,15 +1339,11 @@ POSTHOOK: Input: default@srcpart
 #### A masked pattern was here ####
 0
 PREHOOK: query: -- expressions
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- expressions
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1593,10 +1374,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1632,14 +1409,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1661,13 +1430,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1707,14 +1469,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_double_hour
 #### A masked pattern was here ####
 1000
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1745,10 +1503,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1784,14 +1538,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1813,13 +1559,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1859,14 +1598,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_double_hour
 #### A masked pattern was here ####
 1000
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1897,10 +1632,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1921,14 +1652,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1950,13 +1673,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1996,14 +1712,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Input: default@srcpart_double_hour
 #### A masked pattern was here ####
 1000
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2034,10 +1746,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2058,14 +1766,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2087,13 +1787,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2146,14 +1839,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2184,10 +1873,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2223,14 +1908,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2252,13 +1929,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2313,15 +1983,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 1000
 Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
-EXPLAIN VECTORIZATION select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- parent is reduce tasks
-EXPLAIN VECTORIZATION select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2349,10 +2015,6 @@ STAGE PLANS:
                       Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2373,10 +2035,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2398,13 +2056,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2420,13 +2071,6 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -2474,15 +2118,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 1000
 Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- non-equi join
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2511,10 +2151,6 @@ STAGE PLANS:
                       value expressions: _col0 (type: string), _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2534,14 +2170,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string), _col2 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2569,13 +2197,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2617,15 +2238,11 @@ POSTHOOK: Input: default@srcpart_date_hour
 #### A masked pattern was here ####
 1500
 PREHOOK: query: -- old style join syntax
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr
 PREHOOK: type: QUERY
 POSTHOOK: query: -- old style join syntax
-EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr
+EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2655,10 +2272,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2709,14 +2322,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2738,13 +2343,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2785,15 +2383,11 @@ POSTHOOK: Input: default@srcpart_date_hour
 #### A masked pattern was here ####
 500
 PREHOOK: query: -- left join
-EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- left join
-EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2824,10 +2418,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2863,14 +2453,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -2892,13 +2474,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2919,14 +2494,10 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2975,14 +2546,6 @@ STAGE PLANS:
                             Target Vertex: Map 4
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -2999,10 +2562,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3024,13 +2583,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3052,15 +2604,11 @@ STAGE PLANS:
         ListSink
 
 PREHOOK: query: -- full outer
-EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- full outer
-EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3090,10 +2638,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -3129,14 +2673,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3158,13 +2694,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3186,17 +2715,13 @@ STAGE PLANS:
         ListSink
 
 PREHOOK: query: -- with static pruning
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- with static pruning
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3228,10 +2753,6 @@ STAGE PLANS:
                       value expressions: _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -3267,14 +2788,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -3295,14 +2808,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3340,13 +2845,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3386,16 +2884,12 @@ POSTHOOK: Input: default@srcpart_date
 POSTHOOK: Input: default@srcpart_hour
 #### A masked pattern was here ####
 500
-PREHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3431,12 +2925,6 @@ STAGE PLANS:
                         value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: unknown
-            Map Vectorization:
-                enabled: true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -3457,14 +2945,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -3485,14 +2965,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3530,13 +3002,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3573,15 +3038,11 @@ POSTHOOK: Input: default@srcpart_hour
 #### A masked pattern was here ####
 0
 PREHOOK: query: -- union + subquery
-EXPLAIN VECTORIZATION select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- union + subquery
-EXPLAIN VECTORIZATION select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3614,10 +3075,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -3638,10 +3095,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -3662,10 +3115,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3687,13 +3136,6 @@ STAGE PLANS:
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3709,13 +3151,6 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3752,13 +3187,6 @@ STAGE PLANS:
                           Target Vertex: Map 1
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3819,14 +3247,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 2000
-PREHOOK: query: EXPLAIN VECTORIZATION select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+PREHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+POSTHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -3859,10 +3283,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -3883,10 +3303,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -3907,10 +3323,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -3934,13 +3346,6 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3956,13 +3361,6 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3999,13 +3397,6 @@ STAGE PLANS:
                           Target Vertex: Map 1
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -4067,14 +3458,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 2008-04-08
 2008-04-09
-PREHOOK: query: EXPLAIN VECTORIZATION select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+PREHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
+POSTHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4109,10 +3496,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 10 
             Map Operator Tree:
                 TableScan
@@ -4133,10 +3516,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -4155,10 +3534,6 @@ STAGE PLANS:
                       Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -4179,19 +3554,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 11 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -4243,13 +3607,6 @@ STAGE PLANS:
                           Target Vertex: Map 5
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -4281,13 +3638,6 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -4301,13 +3651,6 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 736 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 8 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -4389,15 +3732,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 2008-04-09
 2008-04-09
 PREHOOK: query: -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 POSTHOOK: query: -- single column, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4441,10 +3780,6 @@ STAGE PLANS:
                           value expressions: _col0 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -4480,23 +3815,8 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4550,17 +3870,13 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- multiple sources, single key
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) 
 where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -4613,10 +3929,6 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -4652,14 +3964,6 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -4695,23 +3999,8 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4767,15 +4056,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 #### A masked pattern was here ####
 500
 PREHOOK: query: -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
 PREHOOK: type: QUERY
 POSTHOOK: query: -- multiple columns single source
-EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+EXPLAIN select count(*) from s

<TRUNCATED>

[56/62] hive git commit: HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index 8d738aa..63aa086 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -23,6 +23,8 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.adapter.druid.DruidQuery;
+import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelVisitor;
@@ -57,8 +59,9 @@ import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidQuery;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan;
@@ -346,6 +349,10 @@ public class ASTConverter {
       TableScan f = (TableScan) r;
       s = new Schema(f);
       ast = ASTBuilder.table(f);
+    } else if (r instanceof DruidQuery) {
+      DruidQuery f = (DruidQuery) r;
+      s = new Schema(f);
+      ast = ASTBuilder.table(f);
     } else if (r instanceof Join) {
       Join join = (Join) r;
       QueryBlockInfo left = convertSource(join.getLeft());
@@ -425,7 +432,8 @@ public class ASTConverter {
     @Override
     public void visit(RelNode node, int ordinal, RelNode parent) {
 
-      if (node instanceof TableScan) {
+      if (node instanceof TableScan ||
+          node instanceof DruidQuery) {
         ASTConverter.this.from = node;
       } else if (node instanceof Filter) {
         handle((Filter) node);
@@ -645,14 +653,30 @@ public class ASTConverter {
         astNodeLst.add(astBldr.node());
       }
 
-      for (RexNode operand : call.operands) {
-        astNodeLst.add(operand.accept(this));
+      if (op.kind == SqlKind.EXTRACT) {
+        // Extract on date: special handling since function in Hive does
+        // include <time_unit>. Observe that <time_unit> information
+        // is implicit in the function name, thus translation will
+        // proceed correctly if we just ignore the <time_unit>
+        astNodeLst.add(call.operands.get(1).accept(this));
+      } else if (op.kind == SqlKind.FLOOR &&
+              call.operands.size() == 2) {
+        // Floor on date: special handling since function in Hive does
+        // include <time_unit>. Observe that <time_unit> information
+        // is implicit in the function name, thus translation will
+        // proceed correctly if we just ignore the <time_unit>
+        astNodeLst.add(call.operands.get(0).accept(this));
+      } else {
+        for (RexNode operand : call.operands) {
+          astNodeLst.add(operand.accept(this));
+        }
       }
 
-      if (isFlat(call))
+      if (isFlat(call)) {
         return SqlFunctionConverter.buildAST(op, astNodeLst, 0);
-      else
+      } else {
         return SqlFunctionConverter.buildAST(op, astNodeLst);
+      }
     }
   }
 
@@ -675,18 +699,21 @@ public class ASTConverter {
     private static final long serialVersionUID = 1L;
 
     Schema(TableScan scan) {
-      HiveTableScan hts;
-      if (scan instanceof DruidQuery) {
-        hts = (HiveTableScan) ((DruidQuery)scan).getTableScan();
-      } else {
-        hts = (HiveTableScan) scan;
-      }
+      HiveTableScan hts = (HiveTableScan) scan;
       String tabName = hts.getTableAlias();
       for (RelDataTypeField field : scan.getRowType().getFieldList()) {
         add(new ColumnInfo(tabName, field.getName()));
       }
     }
 
+    Schema(DruidQuery dq) {
+      HiveTableScan hts = (HiveTableScan) ((DruidQuery)dq).getTableScan();
+      String tabName = hts.getTableAlias();
+      for (RelDataTypeField field : dq.getRowType().getFieldList()) {
+        add(new ColumnInfo(tabName, field.getName()));
+      }
+    }
+
     Schema(Project select, String alias) {
       for (RelDataTypeField field : select.getRowType().getFieldList()) {
         add(new ColumnInfo(alias, field.getName()));

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index 46b936a..2d621e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -46,13 +46,11 @@ import org.apache.calcite.sql.type.SqlTypeUtil;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
-import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
@@ -75,7 +73,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
@@ -168,9 +165,23 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
     }
 
     List<ExprNodeDesc> args = new LinkedList<ExprNodeDesc>();
-
-    for (RexNode operand : call.operands) {
-      args.add(operand.accept(this));
+    if (call.getKind() == SqlKind.EXTRACT) {
+      // Extract on date: special handling since function in Hive does
+      // include <time_unit>. Observe that <time_unit> information
+      // is implicit in the function name, thus translation will
+      // proceed correctly if we just ignore the <time_unit>
+      args.add(call.operands.get(1).accept(this));
+    } else if (call.getKind() == SqlKind.FLOOR &&
+            call.operands.size() == 2) {
+      // Floor on date: special handling since function in Hive does
+      // include <time_unit>. Observe that <time_unit> information
+      // is implicit in the function name, thus translation will
+      // proceed correctly if we just ignore the <time_unit>
+      args.add(call.operands.get(0).accept(this));
+    } else {
+      for (RexNode operand : call.operands) {
+        args.add(operand.accept(this));
+      }
     }
 
     // If Call is a redundant cast then bail out. Ex: cast(true)BOOLEAN
@@ -239,9 +250,20 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
       case VARCHAR:
       case CHAR:
         return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, null);
+      case INTERVAL_YEAR:
+      case INTERVAL_MONTH:
       case INTERVAL_YEAR_MONTH:
         return new ExprNodeConstantDesc(TypeInfoFactory.intervalYearMonthTypeInfo, null);
-      case INTERVAL_DAY_TIME:
+      case INTERVAL_DAY:
+      case INTERVAL_DAY_HOUR:
+      case INTERVAL_DAY_MINUTE:
+      case INTERVAL_DAY_SECOND:
+      case INTERVAL_HOUR:
+      case INTERVAL_HOUR_MINUTE:
+      case INTERVAL_HOUR_SECOND:
+      case INTERVAL_MINUTE:
+      case INTERVAL_MINUTE_SECOND:
+      case INTERVAL_SECOND:
         return new ExprNodeConstantDesc(TypeInfoFactory.intervalDayTimeTypeInfo, null);
       case OTHER:
       default:
@@ -291,12 +313,23 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
       case CHAR: {
         return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
       }
+      case INTERVAL_YEAR:
+      case INTERVAL_MONTH:
       case INTERVAL_YEAR_MONTH: {
         BigDecimal monthsBd = (BigDecimal) literal.getValue();
         return new ExprNodeConstantDesc(TypeInfoFactory.intervalYearMonthTypeInfo,
                 new HiveIntervalYearMonth(monthsBd.intValue()));
       }
-      case INTERVAL_DAY_TIME: {
+      case INTERVAL_DAY:
+      case INTERVAL_DAY_HOUR:
+      case INTERVAL_DAY_MINUTE:
+      case INTERVAL_DAY_SECOND:
+      case INTERVAL_HOUR:
+      case INTERVAL_HOUR_MINUTE:
+      case INTERVAL_HOUR_SECOND:
+      case INTERVAL_MINUTE:
+      case INTERVAL_MINUTE_SECOND:
+      case INTERVAL_SECOND: {
         BigDecimal millisBd = (BigDecimal) literal.getValue();
         // Calcite literal is in millis, we need to convert to seconds
         BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
index 9db7727..acc2799 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.hep.HepRelVertex;
 import org.apache.calcite.plan.volcano.RelSubset;
@@ -111,6 +112,10 @@ public class PlanModifierForASTConv {
     if (rel instanceof HiveTableScan) {
       return ((HiveTableScan)rel).getTableAlias();
     }
+    if (rel instanceof DruidQuery) {
+      DruidQuery dq = (DruidQuery) rel;
+      return ((HiveTableScan) dq.getTableScan()).getTableAlias();
+    }
     if (rel instanceof Project) {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 479070b..cb7bc26 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -29,6 +29,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.calcite.avatica.util.TimeUnit;
+import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.type.RelDataType;
@@ -55,6 +56,8 @@ import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
 import org.apache.hadoop.hive.ql.parse.ParseUtils;
 import org.apache.hadoop.hive.ql.parse.RowResolver;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -230,9 +233,15 @@ public class RexNodeConverter {
       retType = TypeConverter.convert(func.getTypeInfo(), cluster.getTypeFactory());
       SqlOperator calciteOp = SqlFunctionConverter.getCalciteOperator(func.getFuncText(),
           func.getGenericUDF(), argTypeBldr.build(), retType);
-      // If it is a case operator, we need to rewrite it
       if (calciteOp.getKind() == SqlKind.CASE) {
+        // If it is a case operator, we need to rewrite it
         childRexNodeLst = rewriteCaseChildren(func, childRexNodeLst);
+      } else if (HiveExtractDate.ALL_FUNCTIONS.contains(calciteOp)) {
+        // If it is a extract operator, we need to rewrite it
+        childRexNodeLst = rewriteExtractDateChildren(calciteOp, childRexNodeLst);
+      } else if (HiveFloorDate.ALL_FUNCTIONS.contains(calciteOp)) {
+        // If it is a floor <date> operator, we need to rewrite it
+        childRexNodeLst = rewriteFloorDateChildren(calciteOp, childRexNodeLst);
       }
       expr = cluster.getRexBuilder().makeCall(calciteOp, childRexNodeLst);
     } else {
@@ -340,6 +349,56 @@ public class RexNodeConverter {
     return newChildRexNodeLst;
   }
 
+  private List<RexNode> rewriteExtractDateChildren(SqlOperator op, List<RexNode> childRexNodeLst)
+      throws SemanticException {
+    List<RexNode> newChildRexNodeLst = new ArrayList<RexNode>();
+    if (op == HiveExtractDate.YEAR) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.YEAR));
+    } else if (op == HiveExtractDate.QUARTER) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.QUARTER));
+    } else if (op == HiveExtractDate.MONTH) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.MONTH));
+    } else if (op == HiveExtractDate.WEEK) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.WEEK));
+    } else if (op == HiveExtractDate.DAY) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.DAY));
+    } else if (op == HiveExtractDate.HOUR) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.HOUR));
+    } else if (op == HiveExtractDate.MINUTE) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.MINUTE));
+    } else if (op == HiveExtractDate.SECOND) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.SECOND));
+    }
+    assert childRexNodeLst.size() == 1;
+    newChildRexNodeLst.add(childRexNodeLst.get(0));
+    return newChildRexNodeLst;
+  }
+
+  private List<RexNode> rewriteFloorDateChildren(SqlOperator op, List<RexNode> childRexNodeLst)
+      throws SemanticException {
+    List<RexNode> newChildRexNodeLst = new ArrayList<RexNode>();
+    assert childRexNodeLst.size() == 1;
+    newChildRexNodeLst.add(childRexNodeLst.get(0));
+    if (op == HiveFloorDate.YEAR) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.YEAR));
+    } else if (op == HiveFloorDate.QUARTER) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.QUARTER));
+    } else if (op == HiveFloorDate.MONTH) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.MONTH));
+    } else if (op == HiveFloorDate.WEEK) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.WEEK));
+    } else if (op == HiveFloorDate.DAY) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.DAY));
+    } else if (op == HiveFloorDate.HOUR) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.HOUR));
+    } else if (op == HiveFloorDate.MINUTE) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.MINUTE));
+    } else if (op == HiveFloorDate.SECOND) {
+      newChildRexNodeLst.add(cluster.getRexBuilder().makeFlag(TimeUnitRange.SECOND));
+    }
+    return newChildRexNodeLst;
+  }
+
   private static boolean checkForStatefulFunctions(List<ExprNodeDesc> list) {
     for (ExprNodeDesc node : list) {
       if (node instanceof ExprNodeGenericFuncDesc) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index f150132..cb86934 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -48,7 +48,8 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlCountAggFunc
 import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlMinMaxAggFunction;
 import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlSumAggFunction;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateGranularity;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
@@ -216,6 +217,8 @@ public class SqlFunctionConverter {
         case IS_NOT_NULL:
         case IS_NULL:
         case CASE:
+        case EXTRACT:
+        case FLOOR:
         case OTHER_FUNCTION:
           node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
           node.addChild((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text));
@@ -346,21 +349,37 @@ public class SqlFunctionConverter {
       registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       registerDuplicateFunction("case", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       // timebased
-      registerFunction("floor_year", HiveDateGranularity.YEAR,
+      registerFunction("year", HiveExtractDate.YEAR,
+          hToken(HiveParser.Identifier, "year"));
+      registerFunction("quarter", HiveExtractDate.QUARTER,
+          hToken(HiveParser.Identifier, "quarter"));
+      registerFunction("month", HiveExtractDate.MONTH,
+          hToken(HiveParser.Identifier, "month"));
+      registerFunction("weekofyear", HiveExtractDate.WEEK,
+          hToken(HiveParser.Identifier, "weekofyear"));
+      registerFunction("day", HiveExtractDate.DAY,
+          hToken(HiveParser.Identifier, "day"));
+      registerFunction("hour", HiveExtractDate.HOUR,
+          hToken(HiveParser.Identifier, "hour"));
+      registerFunction("minute", HiveExtractDate.MINUTE,
+          hToken(HiveParser.Identifier, "minute"));
+      registerFunction("second", HiveExtractDate.SECOND,
+          hToken(HiveParser.Identifier, "second"));
+      registerFunction("floor_year", HiveFloorDate.YEAR,
           hToken(HiveParser.Identifier, "floor_year"));
-      registerFunction("floor_quarter", HiveDateGranularity.QUARTER,
+      registerFunction("floor_quarter", HiveFloorDate.QUARTER,
           hToken(HiveParser.Identifier, "floor_quarter"));
-      registerFunction("floor_month", HiveDateGranularity.MONTH,
+      registerFunction("floor_month", HiveFloorDate.MONTH,
           hToken(HiveParser.Identifier, "floor_month"));
-      registerFunction("floor_week", HiveDateGranularity.WEEK,
+      registerFunction("floor_week", HiveFloorDate.WEEK,
           hToken(HiveParser.Identifier, "floor_week"));
-      registerFunction("floor_day", HiveDateGranularity.DAY,
+      registerFunction("floor_day", HiveFloorDate.DAY,
           hToken(HiveParser.Identifier, "floor_day"));
-      registerFunction("floor_hour", HiveDateGranularity.HOUR,
+      registerFunction("floor_hour", HiveFloorDate.HOUR,
           hToken(HiveParser.Identifier, "floor_hour"));
-      registerFunction("floor_minute", HiveDateGranularity.MINUTE,
+      registerFunction("floor_minute", HiveFloorDate.MINUTE,
           hToken(HiveParser.Identifier, "floor_minute"));
-      registerFunction("floor_second", HiveDateGranularity.SECOND,
+      registerFunction("floor_second", HiveFloorDate.SECOND,
           hToken(HiveParser.Identifier, "floor_second"));
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index ba41518..a47010d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -75,8 +75,32 @@ public class TypeConverter {
     b.put(SqlTypeName.DOUBLE.getName(), new HiveToken(HiveParser.TOK_DOUBLE, "TOK_DOUBLE"));
     b.put(SqlTypeName.DATE.getName(), new HiveToken(HiveParser.TOK_DATE, "TOK_DATE"));
     b.put(SqlTypeName.TIMESTAMP.getName(), new HiveToken(HiveParser.TOK_TIMESTAMP, "TOK_TIMESTAMP"));
-    b.put(SqlTypeName.INTERVAL_YEAR_MONTH.getName(), new HiveToken(HiveParser.TOK_INTERVAL_YEAR_MONTH, "TOK_INTERVAL_YEAR_MONTH"));
-    b.put(SqlTypeName.INTERVAL_DAY_TIME.getName(), new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME, "TOK_INTERVAL_DAY_TIME"));
+    b.put(SqlTypeName.INTERVAL_YEAR.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL, "TOK_INTERVAL_YEAR_MONTH_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_MONTH.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL, "TOK_INTERVAL_YEAR_MONTH_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_YEAR_MONTH.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL, "TOK_INTERVAL_YEAR_MONTH_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_DAY.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_DAY_HOUR.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_DAY_MINUTE.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_DAY_SECOND.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_HOUR.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_HOUR_MINUTE.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_HOUR_SECOND.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_MINUTE.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_MINUTE_SECOND.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
+    b.put(SqlTypeName.INTERVAL_SECOND.getName(),
+            new HiveToken(HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL, "TOK_INTERVAL_DAY_TIME_LITERAL"));
     b.put(SqlTypeName.BINARY.getName(), new HiveToken(HiveParser.TOK_BINARY, "TOK_BINARY"));
     calciteToHiveTypeNameMap = b.build();
   };
@@ -301,9 +325,20 @@ public class TypeConverter {
       return TypeInfoFactory.dateTypeInfo;
     case TIMESTAMP:
       return TypeInfoFactory.timestampTypeInfo;
+    case INTERVAL_YEAR:
+    case INTERVAL_MONTH:
     case INTERVAL_YEAR_MONTH:
       return TypeInfoFactory.intervalYearMonthTypeInfo;
-    case INTERVAL_DAY_TIME:
+    case INTERVAL_DAY:
+    case INTERVAL_DAY_HOUR:
+    case INTERVAL_DAY_MINUTE:
+    case INTERVAL_DAY_SECOND:
+    case INTERVAL_HOUR:
+    case INTERVAL_HOUR_MINUTE:
+    case INTERVAL_HOUR_SECOND:
+    case INTERVAL_MINUTE:
+    case INTERVAL_MINUTE_SECOND:
+    case INTERVAL_SECOND:
       return TypeInfoFactory.intervalDayTimeTypeInfo;
     case BINARY:
       return TypeInfoFactory.binaryTypeInfo;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index cf66ad9..d32a0a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -41,6 +41,10 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.antlr.runtime.ClassicToken;
 import org.antlr.runtime.tree.TreeVisitor;
 import org.antlr.runtime.tree.TreeVisitorAction;
+import org.apache.calcite.adapter.druid.DruidQuery;
+import org.apache.calcite.adapter.druid.DruidRules;
+import org.apache.calcite.adapter.druid.DruidSchema;
+import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptPlanner.Executor;
@@ -65,7 +69,9 @@ import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.rules.FilterMergeRule;
 import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
 import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
@@ -78,6 +84,7 @@ import org.apache.calcite.rel.rules.UnionMergeRule;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeImpl;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexFieldCollation;
 import org.apache.calcite.rex.RexInputRef;
@@ -132,11 +139,6 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidQuery;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidRules;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidSchema;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
-import org.apache.hadoop.hive.ql.optimizer.calcite.druid.HiveDruidConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
@@ -974,11 +976,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
       final Double maxMemory = (double) HiveConf.getLongVar(
               conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
       HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
-      final int selectThreshold = (int) HiveConf.getIntVar(
-              conf, HiveConf.ConfVars.HIVE_DRUID_SELECT_THRESHOLD);
-      HiveDruidConf druidConf = new HiveDruidConf(selectThreshold);
       HiveRulesRegistry registry = new HiveRulesRegistry();
-      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, druidConf, registry);
+      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry);
       RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
       final RelOptQuery query = new RelOptQuery(planner);
       final RexBuilder rexBuilder = cluster.getRexBuilder();
@@ -1008,8 +1007,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
           this.viewProjectToTableSchema);
       fieldTrimmer.trim(calciteGenPlan);
 
-      // Create MD provider
+      // Create and set MD provider
       HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf);
+      RelMetadataQuery.THREAD_PROVIDERS.set(
+              JaninoRelMetadataProvider.of(mdProvider.getMetadataProvider()));
 
       // Create executor
       Executor executorProvider = new HiveRexExecutorImpl(cluster);
@@ -1031,8 +1032,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
 
           HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
           hepPgmBldr.addRuleInstance(new JoinToMultiJoinRule(HiveJoin.class));
-          hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveRelFactories.HIVE_JOIN_FACTORY,
-              HiveRelFactories.HIVE_PROJECT_FACTORY, HiveRelFactories.HIVE_FILTER_FACTORY));
+          hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveRelFactories.HIVE_BUILDER));
 
           HepProgram hepPgm = hepPgmBldr.build();
           HepPlanner hepPlanner = new HepPlanner(hepPgm);
@@ -1140,10 +1140,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
         calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan);
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null,
                 HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE,
-                new ProjectMergeRule(false, HiveRelFactories.HIVE_PROJECT_FACTORY));
+                new ProjectMergeRule(false, HiveRelFactories.HIVE_BUILDER));
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, true, mdProvider.getMetadataProvider(), null,
-            new HiveFilterProjectTSTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY,
-                    HiveProject.class, HiveRelFactories.HIVE_PROJECT_FACTORY, HiveTableScan.class));
+                HiveFilterProjectTSTransposeRule.INSTANCE, HiveFilterProjectTSTransposeRule.INSTANCE_DRUID,
+                HiveProjectFilterPullUpConstantsRule.INSTANCE);
 
         // 9.2.  Introduce exchange operators below join/multijoin operators
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null,
@@ -1222,7 +1222,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       rules.add(HiveFilterJoinRule.JOIN);
       rules.add(HiveFilterJoinRule.FILTER_ON_JOIN);
       rules.add(new HiveFilterAggregateTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, Aggregate.class));
-      rules.add(new FilterMergeRule(HiveRelFactories.HIVE_FILTER_FACTORY));
+      rules.add(new FilterMergeRule(HiveRelFactories.HIVE_BUILDER));
       if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_REDUCE_WITH_STATS)) {
         rules.add(HiveReduceExpressionsWithStatsRule.INSTANCE);
       }
@@ -1303,9 +1303,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // matches FIL-PROJ-TS
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = hepPlan(basePlan, true, mdProvider, null,
-          new HiveFilterProjectTSTransposeRule(
-              Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
-              HiveRelFactories.HIVE_PROJECT_FACTORY, TableScan.class),
+          HiveFilterProjectTSTransposeRule.INSTANCE, HiveFilterProjectTSTransposeRule.INSTANCE_DRUID,
           HiveProjectFilterPullUpConstantsRule.INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Rerun PPD");
@@ -1822,8 +1820,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
           }
           List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
 
-          DruidTable druidTable = new DruidTable(new DruidSchema(address),
-                  dataSource, rowType, metrics, intervals, DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+          DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
+                  dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
           final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
                   optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias,
                   getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 82080eb..5e708d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -73,8 +73,8 @@ import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBOMaxNumToCNF.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBOMaxNumToCNF.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBOMaxNumToCNF.java
index 277ac1e..cf72b7c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBOMaxNumToCNF.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBOMaxNumToCNF.java
@@ -23,6 +23,7 @@ import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.junit.Test;
@@ -49,7 +50,7 @@ public class TestCBOMaxNumToCNF {
                     rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
                             rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1),
                             rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
-    final RexNode newCond = HiveRexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
+    final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
 
     assertEquals(newCond.toString(), "AND(OR(=($0, 1), =($0, 0)), OR(=($0, 1), =($1, 8)))");
   }
@@ -75,7 +76,7 @@ public class TestCBOMaxNumToCNF {
                     rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
                             rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1),
                             rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
-    final RexNode newCond = HiveRexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
+    final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
 
     assertEquals(newCond.toString(), "OR(=($0, 1), =($0, 2), AND(=($0, 0), =($1, 8)))");
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
index 2830f1f..44e157b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
@@ -61,7 +61,7 @@ public class TestCBORuleFiredOnlyOnce {
 
     // Create rules registry to not trigger a rule more than once
     HiveRulesRegistry registry = new HiveRulesRegistry();
-    HivePlannerContext context = new HivePlannerContext(null, null, registry);
+    HivePlannerContext context = new HivePlannerContext(null, registry);
     HepPlanner planner = new HepPlanner(programBuilder.build(), context);
 
     // Cluster

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
index 3205905..858f550 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -108,8 +108,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":["delta"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["delta"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -139,8 +139,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -170,8 +170,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"ALL","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type groupBy
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type GROUP_BY
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -218,8 +218,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: language is not null (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Filter Operator
@@ -243,8 +243,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: language is not null (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Filter Operator
@@ -279,8 +279,8 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
               columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
 #### A masked pattern was here ####
               name default.druid_table_1
               numFiles 0
@@ -304,8 +304,8 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-                druid.query.type select
+                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+                druid.query.type SELECT
 #### A masked pattern was here ####
                 name default.druid_table_1
                 numFiles 0
@@ -403,8 +403,8 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
             GatherStats: false
             Select Operator
@@ -418,8 +418,8 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Reduce Output Operator
@@ -445,8 +445,8 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
               columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
 #### A masked pattern was here ####
               name default.druid_table_1
               numFiles 0
@@ -470,8 +470,8 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-                druid.query.type select
+                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+                druid.query.type SELECT
 #### A masked pattern was here ####
                 name default.druid_table_1
                 numFiles 0

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out
index 984bb79..0cb373b 100644
--- a/ql/src/test/results/clientpositive/druid_intervals.q.out
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -81,8 +81,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -113,8 +113,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/2012-03-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/2012-03-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -145,8 +145,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2012-03-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2012-03-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -179,8 +179,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -211,8 +211,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -245,8 +245,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00","2012-01-01T00:00:00.000-08:00/2013-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z","2012-01-01T00:00:00.000Z/2013-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -279,8 +279,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2012-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2012-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -311,8 +311,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2010-01-01T00:00:00.001-08:00","2011-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z","2011-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp)
@@ -341,8 +341,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2010-01-01T00:00:00.001-08:00","2011-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z","2011-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+            druid.query.type SELECT
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), 'user1' (type: string)
@@ -372,8 +372,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index 8d974a4..3708fba 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -81,8 +81,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"ALL","aggregations":[{"type":"longMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"longMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint), $f1 (type: float)
@@ -113,8 +113,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"NONE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"NONE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -145,8 +145,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"YEAR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"YEAR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -177,8 +177,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"QUARTER","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"QUARTER","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -209,8 +209,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"MONTH","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"MONTH","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -241,8 +241,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"WEEK","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"WEEK","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -273,8 +273,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"DAY","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"DAY","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -305,8 +305,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"HOUR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"HOUR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -337,8 +337,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"MINUTE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"MINUTE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -369,8 +369,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"SECOND","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"SECOND","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -403,8 +403,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"HOUR","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"HOUR","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type TIMESERIES
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
@@ -442,8 +442,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
@@ -521,8 +521,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/b597ab2a/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
index 17bdaed..51f1fb5 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -87,8 +87,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"ALL","dimension":"robot","metric":"$f1","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":100}
-            druid.query.type topN
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"all","dimension":"robot","metric":"$f1","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"threshold":100}
+            druid.query.type TOP_N
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), $f1 (type: bigint), $f2 (type: float)
@@ -123,8 +123,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"NONE","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":100}
-            druid.query.type topN
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"NONE","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"threshold":100}
+            druid.query.type TOP_N
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
@@ -159,8 +159,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"YEAR","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":10}
-            druid.query.type topN
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"YEAR","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"threshold":10}
+            druid.query.type TOP_N
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
@@ -195,8 +195,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending"}]},"aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type groupBy
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending"}]},"aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type GROUP_BY
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
@@ -231,8 +231,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type groupBy
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type GROUP_BY
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
@@ -267,8 +267,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type groupBy
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type GROUP_BY
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
@@ -305,8 +305,8 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"YEAR","dimensions":[],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f2","direction":"ascending"}]},"filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1_0","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
-            druid.query.type groupBy
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"YEAR","dimensions":[],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f2","direction":"ascending"}]},"filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1_0","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type GROUP_BY
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: '1' (type: string), __time (type: timestamp), $f1_0 (type: bigint), $f2 (type: float)
@@ -349,8 +349,8 @@ STAGE PLANS:
             alias: druid_table_1
             filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+              druid.query.type SELECT
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)


[21/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
index 1fde0a9..a075662 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
@@ -4,7 +4,7 @@ PREHOOK: query: -- SORT_QUERY_RESULTS
 -- Query copied from subquery_in.q
 
 -- non agg, non corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
@@ -16,16 +16,12 @@ POSTHOOK: query: -- SORT_QUERY_RESULTS
 -- Query copied from subquery_in.q
 
 -- non agg, non corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR')
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -69,10 +65,6 @@ STAGE PLANS:
                           value expressions: _col2 (type: int)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 2 
             Map Operator Tree:
                 TableScan
@@ -97,10 +89,6 @@ STAGE PLANS:
                           Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -121,27 +109,10 @@ STAGE PLANS:
                         Statistics: Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
@@ -152,10 +123,6 @@ STAGE PLANS:
                   keys:
                     0 _col1 (type: int)
                     1 _col0 (type: int)
-                  Map Join Vectorization:
-                      className: VectorMapJoinInnerLongOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                   outputColumnNames: _col2, _col4
                   input vertices:
                     0 Map 1
@@ -163,16 +130,9 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col4 (type: int), _col2 (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -210,23 +170,19 @@ POSTHOOK: Input: default@lineitem
 64128	9141
 82704	7721
 PREHOOK: query: -- non agg, corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
 PREHOOK: type: QUERY
 POSTHOOK: query: -- non agg, corr, with join in Parent Query
-explain vectorization expression
+explain
 select p.p_partkey, li.l_suppkey 
 from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
 where li.l_linenumber = 1 and
  li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -270,10 +226,6 @@ STAGE PLANS:
                           value expressions: _col2 (type: int)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 2 
             Map Operator Tree:
                 TableScan
@@ -298,10 +250,6 @@ STAGE PLANS:
                           Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -322,27 +270,10 @@ STAGE PLANS:
                         Statistics: Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.vector.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
@@ -353,10 +284,6 @@ STAGE PLANS:
                   keys:
                     0 _col1 (type: int)
                     1 _col0 (type: int)
-                  Map Join Vectorization:
-                      className: VectorMapJoinInnerLongOperator
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                   outputColumnNames: _col2, _col4
                   input vertices:
                     0 Map 1
@@ -364,16 +291,9 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col4 (type: int), _col2 (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
index d7ebd2b..b6a3b9a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
@@ -211,7 +211,7 @@ POSTHOOK: Output: default@store
 PREHOOK: query: -- For MR, we are verifying this query DOES NOT vectorize the Map vertex with
 -- the 2 TableScanOperators that have different schema.
 
-explain vectorization select
+explain select
         s_state, count(1)
  from store_sales,
  store,
@@ -226,7 +226,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- For MR, we are verifying this query DOES NOT vectorize the Map vertex with
 -- the 2 TableScanOperators that have different schema.
 
-explain vectorization select
+explain select
         s_state, count(1)
  from store_sales,
  store,
@@ -238,10 +238,6 @@ explain vectorization select
  order by s_state
  limit 100
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -277,12 +273,6 @@ STAGE PLANS:
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: unknown
-            Map Vectorization:
-                enabled: true
-                groupByVectorOutput: true
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -303,14 +293,6 @@ STAGE PLANS:
                         value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -330,14 +312,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
@@ -385,13 +359,6 @@ STAGE PLANS:
                       value expressions: _col1 (type: bigint)
         Reducer 4 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -407,13 +374,6 @@ STAGE PLANS:
                   value expressions: _col1 (type: bigint)
         Reducer 5 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
index d537297..469c702 100644
--- a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
@@ -65,20 +65,16 @@ POSTHOOK: query: analyze table orc1 compute statistics
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc1
 POSTHOOK: Output: default@orc1
-PREHOOK: query: explain vectorization from orc1 a
+PREHOOK: query: explain from orc1 a
 insert overwrite table orc_rn1 select a.* where a.rn < 100
 insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000
 insert overwrite table orc_rn3 select a.* where a.rn >= 1000
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization from orc1 a
+POSTHOOK: query: explain from orc1 a
 insert overwrite table orc_rn1 select a.* where a.rn < 100
 insert overwrite table orc_rn2 select a.* where a.rn >= 100 and a.rn < 1000
 insert overwrite table orc_rn3 select a.* where a.rn >= 1000
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-3 is a root stage
   Stage-4 depends on stages: Stage-3
@@ -146,14 +142,6 @@ STAGE PLANS:
                             name: default.orc_rn3
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
 
   Stage: Stage-4
     Dependency Collection

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
index 45520e2..4bfe41a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
@@ -37,17 +37,13 @@ POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@b
 POSTHOOK: Lineage: b.s SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 PREHOOK: query: -- We expect no vectorization due to NULL (void) projection type.
-explain vectorization expression
+explain
 select NULL from a
 PREHOOK: type: QUERY
 POSTHOOK: query: -- We expect no vectorization due to NULL (void) projection type.
-explain vectorization expression
+explain
 select NULL from a
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -75,12 +71,6 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type void of Const void null not supported
-                vectorized: false
 
   Stage: Stage-0
     Fetch Operator
@@ -97,16 +87,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@a
 #### A masked pattern was here ####
 NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select NULL as x from a union distinct select NULL as x from b
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select NULL as x from a union distinct select NULL as x from b
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -142,12 +128,6 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Key expression for GROUPBY operator: Data type void of Const void null not supported
-                vectorized: false
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -169,19 +149,8 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Key expression for GROUPBY operator: Data type void of Const void null not supported
-                vectorized: false
         Reducer 3 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Key expression for GROUPBY operator: Data type void of Column[KEY._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: void)


[09/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
index a7c0d10..1bab6f7 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
@@ -1,19 +1,15 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-EXPLAIN VECTORIZATION EXPRESSION  SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
   FROM alltypesorc t1
   JOIN alltypesorc t2 ON t1.cint = t2.cint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -32,23 +28,12 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: cint is not null (type: boolean)
                     Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
                       Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                       Map Join Operator
                         condition map:
@@ -56,10 +41,6 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerBigOnlyLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 3
@@ -67,21 +48,9 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: int), _col1 (type: int), (_col0 + _col1) (type: int)
                           outputColumnNames: _col0, _col1, _col2
-                          Select Vectorization:
-                              className: VectorSelectOperator
-                              native: true
-                              projectedOutputColumns: [2, 2, 12]
-                              selectExpressions: LongColAddLongColumn(col 2, col 2) -> 12:long
                           Statistics: Num rows: 19518 Data size: 156144 Basic stats: COMPLETE Column stats: COMPLETE
                           Group By Operator
                             aggregations: count(_col0), max(_col1), min(_col0), avg(_col2)
-                            Group By Vectorization:
-                                aggregators: VectorUDAFCount(col 2) -> bigint, VectorUDAFMaxLong(col 2) -> int, VectorUDAFMinLong(col 2) -> int, VectorUDAFAvgLong(col 12) -> struct<count:bigint,sum:double>
-                                className: VectorGroupByOperator
-                                vectorOutput: false
-                                native: false
-                                projectedOutputColumns: [0, 1, 2, 3]
-                                vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(col 12) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false
                             mode: hash
                             outputColumnNames: _col0, _col1, _col2, _col3
                             Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE
@@ -91,63 +60,27 @@ STAGE PLANS:
                               value expressions: _col0 (type: bigint), _col1 (type: int), _col2 (type: int), _col3 (type: struct<count:bigint,sum:double,input:int>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 2) -> boolean
                     predicate: cint is not null (type: boolean)
                     Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2]
                       Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col3] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), avg(VALUE._col3)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
index dbc2a78..0a81f62 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
@@ -1,6 +1,6 @@
 PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression
+explain 
 select
    cdouble
   ,Round(cdouble, 2)
@@ -54,7 +54,7 @@ and sin(cfloat) >= -1.0
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
-explain vectorization expression
+explain 
 select
    cdouble
   ,Round(cdouble, 2)
@@ -106,69 +106,22 @@ where cbigint % 500 = 0
 -- test use of a math function in the WHERE clause
 and sin(cfloat) >= -1.0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 293580 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 12, val 0)(children: LongColModuloLongScalar(col 3, val 500) -> 12:long) -> boolean, FilterDoubleColGreaterEqualDoubleScalar(col 13, val -1.0)(children: FuncSinDoubleToDouble(col 4) -> 13:double) -> boolean) -> boolean
-                    predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean)
-                    Statistics: Num rows: 2048 Data size: 48960 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble)
  (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [5, 13, 12, 14, 15, 16, 18, 17, 19, 20, 21, 23, 22, 24, 25, 26, 27, 28, 30, 31, 29, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 5, 3, 46, 47, 48, 49]
-                          selectExpressions: RoundWithNumDigitsDoubleToDouble(col 5, decimalPlaces 2) -> 13:double, FuncFloorDoubleToLong(col 5) -> 12:long, FuncCeilDoubleToLong(col 5) -> 14:long, FuncRandNoSeed -> 15:double, FuncRand -> 16:double, FuncExpDoubleToDouble(col 17)(children: FuncLnDoubleToDouble(col 5) -> 17:double) -> 18:double, FuncLnDoubleToDouble(col 5) -> 17:double, FuncLnDoubleToDouble(col 4) -> 19:double, FuncLog10DoubleToDouble(col 5) -> 20:double, FuncLog2DoubleToDouble(col 5) -> 21:double, FuncLog2DoubleToDouble(col 22)(children: DoubleColSubtractDoubleScalar(col 5, val 15601.0) -> 22:double) -> 23:double, FuncLog2DoubleToDouble(col 4) -> 22:double, FuncLog2LongToDouble(col 3) -> 24:double, FuncLog2LongToDouble(col 2) -> 25:double, FuncLog2LongToDouble(col 1) -> 26:double, FuncLog2LongToDouble(col 0) -> 27:double, VectorUDFAdaptor(log(2, cdouble)) -> 28:Double, VectorUDFAdaptor(power(log2(cdouble), 2))(children: FuncLog2DoubleToDouble(col 5) -> 29:double) -> 3
 0:Double, VectorUDFAdaptor(power(log2(cdouble), 2))(children: FuncLog2DoubleToDouble(col 5) -> 29:double) -> 31:Double, FuncSqrtDoubleToDouble(col 5) -> 29:double, FuncSqrtLongToDouble(col 3) -> 32:double, FuncBin(col 3) -> 33:String, VectorUDFAdaptor(hex(cdouble)) -> 34:String, VectorUDFAdaptor(conv(cbigint, 10, 16)) -> 35:String, FuncAbsDoubleToDouble(col 5) -> 36:double, FuncAbsLongToLong(col 0) -> 37:long, PosModLongToLong(col 2, divisor 3) -> 38:long, FuncSinDoubleToDouble(col 5) -> 39:double, FuncASinDoubleToDouble(col 5) -> 40:double, FuncCosDoubleToDouble(col 5) -> 41:double, FuncACosDoubleToDouble(col 5) -> 42:double, FuncATanDoubleToDouble(col 5) -> 43:double, FuncDegreesDoubleToDouble(col 5) -> 44:double, FuncRadiansDoubleToDouble(col 5) -> 45:double, DoubleColUnaryMinus(col 5) -> 46:double, FuncSignDoubleToDouble(col 5) -> 47:double, FuncSignLongToDouble(col 3) -> 48:double, FuncCosDoubleToDouble(col 50)(children: DoubleColAddDoubleScalar(col 49, val 3.14159)(children: D
 oubleColUnaryMinus(col 50)(children: FuncSinDoubleToDouble(col 49)(children: FuncLnDoubleToDouble(col 5) -> 49:double) -> 50:double) -> 49:double) -> 50:double) -> 49:double
-                      Statistics: Num rows: 2048 Data size: 1724272 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 2048 Data size: 1724272 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean)
+            Select Operator
+              expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: 
 double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40
+              ListSink
 
 PREHOOK: query: select
    cdouble

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
index 93a68e9..800cbb6 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
@@ -1,15 +1,11 @@
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
-explain vectorization select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint
+explain select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SORT_QUERY_RESULTS
 
-explain vectorization select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint
+explain select sum(t1.td) from (select  v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -70,14 +66,6 @@ STAGE PLANS:
                                 value expressions: _col0 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -98,14 +86,6 @@ STAGE PLANS:
                         value expressions: _col1 (type: smallint), _col2 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -125,23 +105,8 @@ STAGE PLANS:
                         Statistics: Num rows: 9174 Data size: 27400 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_parquet.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_parquet.q.out b/ql/src/test/results/clientpositive/llap/vectorized_parquet.q.out
index 6285c99..8345132 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_parquet.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_parquet.q.out
@@ -46,20 +46,16 @@ POSTHOOK: Lineage: alltypes_parquet.cint SIMPLE [(alltypesorc)alltypesorc.FieldS
 POSTHOOK: Lineage: alltypes_parquet.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: alltypes_parquet.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: alltypes_parquet.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
-PREHOOK: query: explain vectorization select * 
+PREHOOK: query: explain select * 
   from alltypes_parquet
   where cint = 528534767 
   limit 10
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select * 
+POSTHOOK: query: explain select * 
   from alltypes_parquet
   where cint = 528534767 
   limit 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -103,7 +99,7 @@ POSTHOOK: Input: default@alltypes_parquet
 528534767	27	-7824	27.0	-7824.0	cvLH6Eat2yFsyy7p
 528534767	-11	-15431	-11.0	-15431.0	cvLH6Eat2yFsyy7p
 528534767	61	-15549	61.0	-15549.0	cvLH6Eat2yFsyy7p
-PREHOOK: query: explain vectorization select ctinyint, 
+PREHOOK: query: explain select ctinyint, 
   max(cint), 
   min(csmallint), 
   count(cstring1), 
@@ -112,7 +108,7 @@ PREHOOK: query: explain vectorization select ctinyint,
   from alltypes_parquet
   group by ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select ctinyint, 
+POSTHOOK: query: explain select ctinyint, 
   max(cint), 
   min(csmallint), 
   count(cstring1), 
@@ -121,10 +117,6 @@ POSTHOOK: query: explain vectorization select ctinyint,
   from alltypes_parquet
   group by ctinyint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -160,17 +152,8 @@ STAGE PLANS:
                         value expressions: _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,input:float>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: llap
             LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: hive.vectorized.use.row.serde.deserialize IS false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:float> of Column[VALUE._col3] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), min(VALUE._col1), count(VALUE._col2), avg(VALUE._col3), stddev_pop(VALUE._col4)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_parquet_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_parquet_types.q.out b/ql/src/test/results/clientpositive/llap/vectorized_parquet_types.q.out
index 3c977f1..b49d5dd 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_parquet_types.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_parquet_types.q.out
@@ -118,12 +118,12 @@ POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet
 POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ]
 POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ]
 PREHOOK: query: -- select
-explain vectorization expression
+explain
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
 hex(cbinary), cdecimal FROM parquet_types
 PREHOOK: type: QUERY
 POSTHOOK: query: -- select
-explain vectorization expression
+explain
 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
 hex(cbinary), cdecimal FROM parquet_types
 POSTHOOK: type: QUERY
@@ -169,10 +169,10 @@ POSTHOOK: Input: default@parquet_types
 119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	68692CCAC0BDE7	12.83
 120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	B4F3CAFDBEDD	73.04
 121	1	2	1.1	6.3	lmn	2032-10-10 22:22:22.222222222	bcdef	abcde		90.33
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
@@ -215,7 +215,7 @@ uvwzy	5	abcdede	7	4.76	1
 vwxyz	5	abcdede	7	12.83	1
 wxyza	5	abcde	5	73.04	1
 bcdef	5	abcde	5	90.33	1
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),
@@ -227,7 +227,7 @@ FROM parquet_types
 GROUP BY ctinyint
 ORDER BY ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 SELECT ctinyint,
   MAX(cint),
   MIN(csmallint),


[26/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
index 61702bd..23a977e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
@@ -44,7 +44,7 @@ POSTHOOK: Lineage: vector_interval_2.str4 EXPRESSION []
 POSTHOOK: Lineage: vector_interval_2.ts EXPRESSION []
 PREHOOK: query: -- interval comparisons in select clause
 
-explain vectorization expression
+explain
 select
   str1,
   -- Should all be true
@@ -78,7 +78,7 @@ from vector_interval_2 order by str1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- interval comparisons in select clause
 
-explain vectorization expression
+explain
 select
   str1,
   -- Should all be true
@@ -110,10 +110,6 @@ select
   interval '1-2' year to month != interval_year_month(str2)
 from vector_interval_2 order by str1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -131,61 +127,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MON
 TH) <= 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) (type: boolean), (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
-                        selectExpressions: LongColEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 8:long, LongColLessEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 9:long, LongColLessEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> 10:long, LongColLessLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> 11:long, LongColGreaterEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 12:long, LongColGreaterEqu
 alLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 13:long, LongColGreaterLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 14:long, LongColNotEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> 15:long, IntervalYearMonthColEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 16:long, IntervalYearMonthColLessEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 17:long, IntervalYearMonthColLessEqualIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 1
 8:long, IntervalYearMonthColLessIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 19:long, IntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 20:long, IntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 21:long, IntervalYearMonthColGreaterIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 22:long, IntervalYearMonthColNotEqualIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 23:long, IntervalYearMonthScalarEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 24:long, IntervalYearMonthScalarLessEqualIntervalYearMonthColumn(val 14, col 6)(chi
 ldren: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 25:long, IntervalYearMonthScalarLessEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 26:long, IntervalYearMonthScalarLessIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 27:long, IntervalYearMonthScalarGreaterEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 28:long, IntervalYearMonthScalarGreaterEqualIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 29:long, IntervalYearMonthScalarGreaterIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 30:long, IntervalYearMonthScalarNotEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_
 month) -> 31:long
                     Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean), _col18 (type: boolean), _col19 (type: boolean), _col20 (type: boolean), _col21 (type: boolean), _col22 (type: boolean), _col23 (type: boolean), _col24 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean), VALUE._col12 (type: boolean), VALUE._col13 (type: boolean), VALUE._col14 (type: boolean), VALUE._col15 (type: boolean), VALUE._col16 (type: boolean), VALUE._col17 (type: boolean), VALUE._col18 (type: boolean), VALUE._col19 (type: boolean), VALUE._col20 (type: boolean), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean), VALUE._col23 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
                 Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -266,7 +227,7 @@ POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 1-2	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   str1,
   -- Should all be false
@@ -292,7 +253,7 @@ select
   interval '1-2' year to month != interval_year_month(str1)
 from vector_interval_2 order by str1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   str1,
   -- Should all be false
@@ -318,10 +279,6 @@ select
   interval '1-2' year to month != interval_year_month(str1)
 from vector_interval_2 order by str1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -339,61 +296,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < 1-2) (type: boolean), (1-2 <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str
 1 AS INTERVAL YEAR TO MONTH) <> 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > 1-3) (type: boolean)
                     outputColumnNames: _col0, _col1, _col10, _col11, _col13, _col14, _col15, _col16, _col17, _col2, _col3, _col4, _col5, _col7, _col8, _col9
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                        selectExpressions: LongColNotEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 8:long, IntervalYearMonthColLessEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 9:long, IntervalYearMonthColLessIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 10:long, IntervalYearMonthScalarNotEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 11:long, IntervalYearMonthScalarGreaterEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 12:long, IntervalYearMonthScalarGreaterIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> 13:long,
  IntervalYearMonthScalarLessEqualIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 14:long, IntervalYearMonthScalarLessIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 15:long, LongColGreaterEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> 16:long, LongColGreaterLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> 17:long, LongColLessEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> 18:long, LongColLessLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringTo
 IntervalYearMonth(col 2) -> 7:interval_year_month) -> 19:long, IntervalYearMonthColNotEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 20:long, IntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 21:long, IntervalYearMonthColGreaterIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 22:long
                     Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col0 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean), VALUE._col9 (type: boolean), VALUE._col5 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean), VALUE._col12 (type: boolean), VALUE._col13 (type: boolean), VALUE._col14 (type: boolean), VALUE._col10 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 1, 6, 7, 8, 9, 10, 6, 11, 12, 13, 14, 15, 11]
                 Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -462,7 +384,7 @@ POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 1-2	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   str3,
   -- Should all be true
@@ -494,7 +416,7 @@ select
   interval '1 2:3:4' day to second != interval_day_time(str4)
 from vector_interval_2 order by str3
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   str3,
   -- Should all be true
@@ -526,10 +448,6 @@ select
   interval '1 2:3:4' day to second != interval_day_time(str4)
 from vector_interval_2 order by str3
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -547,61 +465,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:04.000000000) (type: boolean), (
 CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) (type: boolean), (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 > CAST( 
 str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
-                        selectExpressions: IntervalDayTimeColEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 8:long, IntervalDayTimeColLessEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 9:long, IntervalDayTimeColLessEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> 10:long, IntervalDayTimeColLessIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> 11:long, IntervalDayTimeColGreaterEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToInterv
 alDayTime(col 4) -> 7:interval_day_time) -> 12:long, IntervalDayTimeColGreaterEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 13:long, IntervalDayTimeColGreaterIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 14:long, IntervalDayTimeColNotEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> 15:long, IntervalDayTimeColEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 16:long, IntervalDayTimeColLessEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 17:long, IntervalDayTi
 meColLessEqualIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 18:long, IntervalDayTimeColLessIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 19:long, IntervalDayTimeColGreaterEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 20:long, IntervalDayTimeColGreaterEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 21:long, IntervalDayTimeColGreaterIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 22:long, IntervalDayTimeColNotEqualIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 23:long, IntervalDayTimeScalarEqualInterval
 DayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 24:long, IntervalDayTimeScalarLessEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 25:long, IntervalDayTimeScalarLessEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 26:long, IntervalDayTimeScalarLessIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 27:long, IntervalDayTimeScalarGreaterEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 28:long, IntervalDayTimeScalarGreaterEqualIntervalDayTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 29:long, IntervalDayTimeScalarGreaterIntervalDa
 yTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 30:long, IntervalDayTimeScalarNotEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 31:long
                     Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean), _col18 (type: boolean), _col19 (type: boolean), _col20 (type: boolean), _col21 (type: boolean), _col22 (type: boolean), _col23 (type: boolean), _col24 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean), VALUE._col12 (type: boolean), VALUE._col13 (type: boolean), VALUE._col14 (type: boolean), VALUE._col15 (type: boolean), VALUE._col16 (type: boolean), VALUE._col17 (type: boolean), VALUE._col18 (type: boolean), VALUE._col19 (type: boolean), VALUE._col20 (type: boolean), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean), VALUE._col23 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
                 Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -682,7 +565,7 @@ POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 1 2:3:4	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true	true
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
   str3,
   -- Should all be false
@@ -708,7 +591,7 @@ select
   interval '1 2:3:4' day to second != interval_day_time(str3)
 from vector_interval_2 order by str3
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
   str3,
   -- Should all be false
@@ -734,10 +617,6 @@ select
   interval '1 2:3:4' day to second != interval_day_time(str3)
 from vector_interval_2 order by str3
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -755,61 +634,26 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Select Operator
                     expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < 1 02:03:04.000000000) (type: boolean), (1 02:03:04.000000000 <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type
 : boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > 1 02:03:05.000000000) (type: boolean)
                     outputColumnNames: _col0, _col1, _col10, _col11, _col13, _col14, _col15, _col16, _col17, _col2, _col3, _col4, _col5, _col7, _col8, _col9
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                        selectExpressions: IntervalDayTimeColNotEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 8:long, IntervalDayTimeColLessEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 9:long, IntervalDayTimeColLessIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 10:long, IntervalDayTimeScalarNotEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 11:long, IntervalDayTimeScalarGreaterEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 12:long, IntervalDayTimeScalarGreaterIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastSt
 ringToIntervalDayTime(col 5) -> 6:interval_day_time) -> 13:long, IntervalDayTimeScalarLessEqualIntervalDayTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 14:long, IntervalDayTimeScalarLessIntervalDayTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 15:long, IntervalDayTimeColGreaterEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> 16:long, IntervalDayTimeColGreaterIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> 17:long, IntervalDayTimeColLessEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 18:l
 ong, IntervalDayTimeColLessIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> 19:long, IntervalDayTimeColNotEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 20:long, IntervalDayTimeColGreaterEqualIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 21:long, IntervalDayTimeColGreaterIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> 22:long
                     Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col0 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean), VALUE._col9 (type: boolean), VALUE._col5 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: boolean), VALUE._col12 (type: boolean), VALUE._col13 (type: boolean), VALUE._col14 (type: boolean), VALUE._col10 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 1, 6, 7, 8, 9, 10, 6, 11, 12, 13, 14, 15, 11]
                 Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -879,7 +723,7 @@ POSTHOOK: Input: default@vector_interval_2
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 1 2:3:4	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false	false
 PREHOOK: query: -- interval expressions in predicates
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   interval_year_month(str1) = interval_year_month(str1)
@@ -905,7 +749,7 @@ where
 order by ts
 PREHOOK: type: QUERY
 POSTHOOK: query: -- interval expressions in predicates
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   interval_year_month(str1) = interval_year_month(str1)
@@ -930,10 +774,6 @@ where
   and interval '1-3' year to month > interval_year_month(str1)
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -951,66 +791,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> boolean, FilterLongColNotEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> boolean, FilterLongColLessEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> boolean, FilterLongColLessLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 3) -> 7:interval_year_month) -> boolean, FilterLongColGreaterEqualLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringToIntervalYearMonth(c
 ol 2) -> 7:interval_year_month) -> boolean, FilterLongColGreaterLongColumn(col 6, col 7)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month, CastStringToIntervalYearMonth(col 2) -> 7:interval_year_month) -> boolean, FilterIntervalYearMonthColEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthColNotEqualIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthColLessEqualIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthColLessIntervalYearMonthScalar(col 6, val 15)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:inter
 val_year_month) -> boolean, FilterIntervalYearMonthColGreaterIntervalYearMonthScalar(col 6, val 14)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthScalarEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthScalarNotEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthScalarLessEqualIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthScalarLessIntervalYearMonthColumn(val 14, col 6)(children: CastStringToIntervalYearMonth(col 3) -> 6:interval_year_month) -> boolean, FilterIntervalYearMonthScalarGreaterEqualIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean, FilterInt
 ervalYearMonthScalarGreaterIntervalYearMonthColumn(val 15, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> boolean) -> boolean
                     predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-
 2 < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1076,7 +878,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 2001-01-01 01:02:03
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select ts from vector_interval_2
 where
   interval_day_time(str3) = interval_day_time(str3)
@@ -1101,7 +903,7 @@ where
   and interval '1 2:3:5' day to second > interval_day_time(str3)
 order by ts
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select ts from vector_interval_2
 where
   interval_day_time(str3) = interval_day_time(str3)
@@ -1126,10 +928,6 @@ where
   and interval '1 2:3:5' day to second > interval_day_time(str3)
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1147,66 +945,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterIntervalDayTimeColEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColNotEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColLessEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColLessIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time, CastStringToIntervalDayTime(col 5) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColGreaterEqualIntervalDayTimeColumn(col 6, col 7)(children: CastStringToInte
 rvalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColGreaterIntervalDayTimeColumn(col 6, col 7)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time, CastStringToIntervalDayTime(col 4) -> 7:interval_day_time) -> boolean, FilterIntervalDayTimeColEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeColNotEqualIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeColLessEqualIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeColLessIntervalDayTimeScalar(col 6, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterInte
 rvalDayTimeColGreaterEqualIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeColGreaterIntervalDayTimeScalar(col 6, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeScalarEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeScalarNotEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeScalarLessEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeScalarLessIntervalDayTimeColumn(val 1 02:03:04.000000000, col 6)(children: CastStringToIntervalDayTime(col 5) -> 6:interval_day_time) ->
  boolean, FilterIntervalDayTimeScalarGreaterEqualIntervalDayTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean, FilterIntervalDayTimeScalarGreaterIntervalDayTimeColumn(val 1 02:03:05.000000000, col 6)(children: CastStringToIntervalDayTime(col 4) -> 6:interval_day_time) -> boolean) -> boolean
                     predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) and (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO
  SECOND)) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1272,7 +1032,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 2001-01-01 01:02:03
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select ts from vector_interval_2
 where
   date '2002-03-01' = dt + interval_year_month(str1)
@@ -1292,7 +1052,7 @@ where
   and dt != dt + interval '1-2' year to month
 order by ts
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select ts from vector_interval_2
 where
   date '2002-03-01' = dt + interval_year_month(str1)
@@ -1312,10 +1072,6 @@ where
   and dt != dt + interval '1-2' year to month
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1333,66 +1089,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterDateScalarEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateScalarLessEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateScalarGreaterEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateColEqualDateScalar(col 7, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateColLessEqualDateScalar(col 7, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1
 , col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateColGreaterEqualDateScalar(col 7, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterLongColNotEqualLongColumn(col 1, col 7)(children: DateColAddIntervalYearMonthColumn(col 1, col 6)(children: CastStringToIntervalYearMonth(col 2) -> 6:interval_year_month) -> 7:long) -> boolean, FilterDateScalarEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterDateScalarLessEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterDateScalarGreaterEqualDateColumn(val 11747, col 7)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterDateColEqualDateScalar(col 7, val 11747)(children: DateColAddInterva
 lYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterDateColLessEqualDateScalar(col 7, val 11747)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterDateColGreaterEqualDateScalar(col 7, val 11747)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean, FilterLongColNotEqualLongColumn(col 1, col 7)(children: DateColAddIntervalYearMonthScalar(col 1, val 1-2) -> 7:long) -> boolean) -> boolean
                     predicate: ((2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 = (dt + 1-2)) and (2002-03-01 <= (dt + 1-2)) and (2002-03-01 >= (dt + 1-2)) and ((dt + 1-2) = 2002-03-01) and ((dt + 1-2) <= 2002-03-01) and ((dt + 1-2) >= 2002-03-01) and (dt <> (dt + 1-2))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1448,7 +1166,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 2001-01-01 01:02:03
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select ts from vector_interval_2
 where
   timestamp '2002-03-01 01:02:03' = ts + interval '1-2' year to month
@@ -1473,7 +1191,7 @@ where
   and ts > ts - interval '1' year
 order by ts
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select ts from vector_interval_2
 where
   timestamp '2002-03-01 01:02:03' = ts + interval '1-2' year to month
@@ -1498,10 +1216,6 @@ where
   and ts > ts - interval '1' year
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1519,66 +1233,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampScalarLessEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampScalarGreaterEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampScalarNotEqualTimestampColumn(val 2002-04-01 01:02:03.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampScalarLessTimestampColumn(val 2002-02-01 01:02:03.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampScalarGreaterTimestampColumn(val 2002-04-01 01:02:0
 3.0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColEqualTimestampScalar(col 6, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColGreaterEqualTimestampScalar(col 6, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColLessEqualTimestampScalar(col 6, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColNotEqualTimestampScalar(col 6, val 2002-04-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColGreaterTimestampScalar(col 6, val 2002-02-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColLessTimestampScalar(col 6, v
 al 2002-04-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-2) -> 6:timestamp) -> boolean, FilterTimestampColEqualTimestampColumn(col 0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 0-0) -> 6:timestamp) -> boolean, FilterTimestampColNotEqualTimestampColumn(col 0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-0) -> 6:timestamp) -> boolean, FilterTimestampColLessEqualTimestampColumn(col 0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-0) -> 6:timestamp) -> boolean, FilterTimestampColLessTimestampColumn(col 0, col 6)(children: TimestampColAddIntervalYearMonthScalar(col 0, val 1-0) -> 6:timestamp) -> boolean, FilterTimestampColGreaterEqualTimestampColumn(col 0, col 6)(children: TimestampColSubtractIntervalYearMonthScalar(col 0, val 1-0) -> 6:timestamp) -> boolean, FilterTimestampColGreaterTimestampColumn(col 0, col 6)(children: TimestampColSubtractIntervalYearMonthScalar(col 0, val 1-0) -> 6:
 timestamp) -> boolean) -> boolean
                     predicate: ((2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2)) and (2002-03-01 01:02:03.0 >= (ts + 1-2)) and (2002-04-01 01:02:03.0 <> (ts + 1-2)) and (2002-02-01 01:02:03.0 < (ts + 1-2)) and (2002-04-01 01:02:03.0 > (ts + 1-2)) and ((ts + 1-2) = 2002-03-01 01:02:03.0) and ((ts + 1-2) >= 2002-03-01 01:02:03.0) and ((ts + 1-2) <= 2002-03-01 01:02:03.0) and ((ts + 1-2) <> 2002-04-01 01:02:03.0) and ((ts + 1-2) > 2002-02-01 01:02:03.0) and ((ts + 1-2) < 2002-04-01 01:02:03.0) and (ts = (ts + 0-0)) and (ts <> (ts + 1-0)) and (ts <= (ts + 1-0)) and (ts < (ts + 1-0)) and (ts >= (ts - 1-0)) and (ts > (ts - 1-0))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1645,7 +1321,7 @@ POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 2001-01-01 01:02:03
 PREHOOK: query: -- day to second expressions in predicate
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = dt + interval '0 1:2:3' day to second
@@ -1671,7 +1347,7 @@ where
 order by ts
 PREHOOK: type: QUERY
 POSTHOOK: query: -- day to second expressions in predicate
-explain vectorization expression
+explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = dt + interval '0 1:2:3' day to second
@@ -1696,10 +1372,6 @@ where
   and ts > dt - interval '0 1:2:4' day to second
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1717,66 +1389,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarNotEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarLessEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarGreaterEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColSubtractIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimes
 tampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: DateColSubtractIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColEqualTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColNotEqualTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColGreaterEqualTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColGreaterTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColLessEqualTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColSubtractInte
 rvalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColLessTimestampScalar(col 6, val 2001-01-01 01:02:03.0)(children: DateColSubtractIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColEqualTimestampColumn(col 0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColNotEqualTimestampColumn(col 0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColLessEqualTimestampColumn(col 0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColLessTimestampColumn(col 0, col 6)(children: DateColAddIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean, FilterTimestampColGreaterEqualTimestampColumn(col 0, col 6)(children: DateColSubtractIntervalDayTimeScalar(c
 ol 1, val 0 01:02:03.000000000) -> 6:timestamp) -> boolean, FilterTimestampColGreaterTimestampColumn(col 0, col 6)(children: DateColSubtractIntervalDayTimeScalar(col 1, val 0 01:02:04.000000000) -> 6:timestamp) -> boolean) -> boolean
                     predicate: ((2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000)) and ((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0) and (ts = (dt + 0 01:02:03.000000000)) and (ts <> (dt + 0 01:02:04.000000000)) and (ts <= (dt + 0 01:02:03.000000000)) and (ts < (dt + 0 01:02:04.000000000)) and (ts >= (dt - 0 01:02:03.000000000)) and (ts > (dt - 0 01:02:04.000000000))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
                       outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0]
                       Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: timestamp)
                         sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1842,7 +1476,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@vector_interval_2
 #### A masked pattern was here ####
 2001-01-01 01:02:03
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = ts + interval '0' day
@@ -1867,7 +1501,7 @@ where
   and ts > ts - interval '1' day
 order by ts
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select ts from vector_interval_2
 where
   timestamp '2001-01-01 01:02:03' = ts + interval '0' day
@@ -1892,10 +1526,6 @@ where
   and ts > ts - interval '1' day
 order by ts
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1913,66 +1543,28 @@ STAGE PLANS:
                 TableScan
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: TimestampColAddIntervalDayTimeScalar(col 0, val 0 00:00:00.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarNotEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: TimestampColAddIntervalDayTimeScalar(col 0, val 1 00:00:00.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarLessEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: TimestampColAddIntervalDayTimeScalar(col 0, val 1 00:00:00.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03.0, col 6)(children: TimestampColAddIntervalDayTimeScalar(col 0, val 1 00:00:00.000000000) -> 6:timestamp) -> boolean, FilterTimestampScalarGreaterEqualTimestampColumn(val 2001

<TRUNCATED>

[19/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
index aa8ed4a..b926ab4b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
@@ -1,82 +1,31 @@
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cdouble, nvl(cdouble, 100) as n
+PREHOOK: query: EXPLAIN SELECT cdouble, nvl(cdouble, 100) as n
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cdouble, nvl(cdouble, 100) as n
+POSTHOOK: query: EXPLAIN SELECT cdouble, nvl(cdouble, 100) as n
 FROM alltypesorc
 WHERE (cdouble IS NULL)
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNull(col 5) -> boolean
-                    predicate: cdouble is null (type: boolean)
-                    Statistics: Num rows: 3114 Data size: 18608 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: null (type: double), 100.0 (type: double)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [12, 13]
-                          selectExpressions: ConstantVectorExpression(val null) -> 12:double, ConstantVectorExpression(val 100.0) -> 13:double
-                      Statistics: Num rows: 3114 Data size: 24920 Basic stats: COMPLETE Column stats: COMPLETE
-                      Limit
-                        Number of rows: 10
-                        Limit Vectorization:
-                            className: VectorLimitOperator
-                            native: true
-                        Statistics: Num rows: 10 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          File Sink Vectorization:
-                              className: VectorFileSinkOperator
-                              native: false
-                          Statistics: Num rows: 10 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: cdouble is null (type: boolean)
+            Select Operator
+              expressions: null (type: double), 100.0 (type: double)
+              outputColumnNames: _col0, _col1
+              Limit
+                Number of rows: 10
+                ListSink
 
 PREHOOK: query: SELECT cdouble, nvl(cdouble, 100) as n
 FROM alltypesorc
@@ -102,76 +51,30 @@ NULL	100.0
 NULL	100.0
 NULL	100.0
 NULL	100.0
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cfloat, nvl(cfloat, 1) as n
+PREHOOK: query: EXPLAIN SELECT cfloat, nvl(cfloat, 1) as n
 FROM alltypesorc
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cfloat, nvl(cfloat, 1) as n
+POSTHOOK: query: EXPLAIN SELECT cfloat, nvl(cfloat, 1) as n
 FROM alltypesorc
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Select Operator
-                    expressions: cfloat (type: float), NVL(cfloat,1) (type: float)
-                    outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4, 13]
-                        selectExpressions: VectorCoalesce(columns [4, 12])(children: col 4, ConstantVectorExpression(val 1.0) -> 12:double) -> 13:float
-                    Statistics: Num rows: 12288 Data size: 85848 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 10
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 10 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 10 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: cfloat (type: float), NVL(cfloat,1) (type: float)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 10
+              ListSink
 
 PREHOOK: query: SELECT cfloat, nvl(cfloat, 1) as n
 FROM alltypesorc
@@ -195,76 +98,30 @@ NULL	1.0
 27.0	27.0
 -11.0	-11.0
 61.0	61.0
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, 10) as n
+PREHOOK: query: EXPLAIN SELECT nvl(null, 10) as n
 FROM alltypesorc
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, 10) as n
+POSTHOOK: query: EXPLAIN SELECT nvl(null, 10) as n
 FROM alltypesorc
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Select Operator
-                    expressions: 10 (type: int)
-                    outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [12]
-                        selectExpressions: ConstantVectorExpression(val 10) -> 12:long
-                    Statistics: Num rows: 12288 Data size: 49152 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 10
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: 10 (type: int)
+            outputColumnNames: _col0
+            Limit
+              Number of rows: 10
+              ListSink
 
 PREHOOK: query: SELECT nvl(null, 10) as n
 FROM alltypesorc
@@ -288,60 +145,30 @@ POSTHOOK: Input: default@alltypesorc
 10
 10
 10
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, null) as n
+PREHOOK: query: EXPLAIN SELECT nvl(null, null) as n
 FROM alltypesorc
 LIMIT 10
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT nvl(null, null) as n
+POSTHOOK: query: EXPLAIN SELECT nvl(null, null) as n
 FROM alltypesorc
 LIMIT 10
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: null (type: void)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 12288 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 10
-                      Statistics: Num rows: 10 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 10 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Data type void of Const void null not supported
-                vectorized: false
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Select Operator
+            expressions: null (type: void)
+            outputColumnNames: _col0
+            Limit
+              Number of rows: 10
+              ListSink
 
 PREHOOK: query: SELECT nvl(null, null) as n
 FROM alltypesorc

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
index 2073b22..503cf5b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
@@ -101,16 +101,12 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select bo, max(b) from vectortab2korc group by bo order by bo desc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select bo, max(b) from vectortab2korc group by bo order by bo desc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -129,26 +125,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: bo (type: boolean), b (type: bigint)
                     outputColumnNames: bo, b
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [7, 3]
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: max(b)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMaxLong(col 3) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 7
-                          native: false
-                          projectedOutputColumns: [0]
                       keys: bo (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -157,41 +139,15 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: boolean)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMaxLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: [0]
                 keys: KEY._col0 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
@@ -199,36 +155,17 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: boolean)
                   sort order: -
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: Uniform Hash IS false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: bigint)
                 outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1]
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
index 9301a4e..6edcbeb 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
@@ -62,16 +62,12 @@ POSTHOOK: Input: default@orc_table_2
 4	FOUR
 NULL	<NULL1>
 NULL	<NULL2>
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -89,16 +85,9 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: v1 (type: string), a (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -106,25 +95,12 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [1]
-                          bigTableOuterKeyMapping: 1 -> 2
-                          bigTableRetainedColumns: [0, 1, 2]
-                          bigTableValueColumns: [0, 1]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0, 1, 2, 3]
-                          smallTableMapping: [3]
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -132,61 +108,23 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: bigint, String
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: c (type: int), v2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -214,16 +152,12 @@ one	1	NULL	NULL
 one	1	NULL	NULL
 three	3	3	THREE
 two	2	2	TWO
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -241,57 +175,26 @@ STAGE PLANS:
                 TableScan
                   alias: t1
                   Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: v1 (type: string), a (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col1 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col1 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: v1:string, a:int
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: t2
                   Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: c (type: int), v2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1]
                     Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
@@ -299,25 +202,12 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [0]
-                          bigTableOuterKeyMapping: 0 -> 3
-                          bigTableRetainedColumns: [0, 1, 3]
-                          bigTableValueColumns: [0, 1]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [2, 3, 0, 1]
-                          smallTableMapping: [2]
                       outputColumnNames: _col0, _col1, _col2, _col3
                       input vertices:
                         0 Map 1
                       Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -325,20 +215,6 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: c:int, v2:string
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: String, bigint
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
index f3af684..cda039f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
@@ -216,22 +216,18 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select * 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
   on cd.cint = c.cint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select * 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
   on cd.cint = c.cint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -249,16 +245,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                     Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
                     Map Join Operator
                       condition map:
@@ -266,25 +255,12 @@ STAGE PLANS:
                       keys:
                         0 _col2 (type: int)
                         1 _col2 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableOuterKeyMapping: 2 -> 14
-                          bigTableRetainedColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14]
-                          bigTableValueColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
-                          smallTableMapping: [12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23]
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 32 Data size: 19648 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 32 Data size: 19648 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -292,61 +268,23 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: bigint, bigint, bigint, bigint, Double, Double, String, String, Timestamp, Timestamp, bigint, bigint
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                     Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col2 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col2 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -391,22 +329,18 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false	NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false	NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false	NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select c.ctinyint 
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -424,16 +358,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     Map Join Operator
                       condition map:
@@ -441,23 +368,12 @@ STAGE PLANS:
                       keys:
                         0 _col0 (type: tinyint)
                         1 _col0 (type: tinyint)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [0]
-                          bigTableRetainedColumns: [0]
-                          bigTableValueColumns: [0]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0]
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 112 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
                         Statistics: Num rows: 112 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -465,59 +381,22 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: tinyint)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
 
   Stage: Stage-0
     Fetch Operator
@@ -648,7 +527,7 @@ NULL
 NULL
 NULL
 NULL
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -657,7 +536,7 @@ left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -666,10 +545,6 @@ left outer join small_alltypesorc_a hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -688,16 +563,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 15 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint), cint (type: int)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 2]
                     Statistics: Num rows: 15 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                     Map Join Operator
                       condition map:
@@ -705,14 +573,6 @@ STAGE PLANS:
                       keys:
                         0 _col1 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableRetainedColumns: [0]
-                          bigTableValueColumns: [0]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [0]
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 3
@@ -723,163 +583,63 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: tinyint)
                           1 _col0 (type: tinyint)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [0]
-                            bigTableRetainedColumns: [0]
-                            bigTableValueColumns: [0]
-                            className: VectorMapJoinOuterLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [0]
                         outputColumnNames: _col0
                         input vertices:
                           1 Map 4
                         Statistics: Num rows: 240 Data size: 960 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
                           aggregations: count(), sum(_col0)
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint, VectorUDAFSumLong(col 0) -> bigint
-                              className: VectorGroupByOperator
-                              vectorOutput: true
-                              native: false
-                              projectedOutputColumns: [0, 1]
                           mode: hash
                           outputColumnNames: _col0, _col1
                           Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                           Reduce Output Operator
                             sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkOperator
-                                native: false
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                                nativeConditionsNotMet: Uniform Hash IS false
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: tinyint)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint
-                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), sum(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint, VectorUDAFSumLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
index a1b14ce..051911b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
@@ -226,7 +226,7 @@ NULL	NULL	-850295959	-1887561756	NULL	NULL	WMIgGA73	4hA4KQj2vD3fI6gX82220d	NULL
 NULL	NULL	-886426182	-1887561756	NULL	NULL	0i88xYq3gx1nW4vKjp7vBp3	4hA4KQj2vD3fI6gX82220d	NULL	1969-12-31 16:00:04.472	true	false
 NULL	NULL	-899422227	-1645852809	NULL	NULL	73xdw4X	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:07.395	false	false
 NULL	NULL	-971543377	-1645852809	NULL	NULL	uN803aW	xH7445Rals48VOulSyR5F	NULL	1969-12-31 16:00:05.43	false	false
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -235,7 +235,7 @@ left outer join small_alltypesorc_a hd
   on hd.cbigint = c.cbigint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -244,10 +244,6 @@ left outer join small_alltypesorc_a hd
   on hd.cbigint = c.cbigint
 ) t1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -266,16 +262,9 @@ STAGE PLANS:
                 TableScan
                   alias: c
                   Statistics: Num rows: 20 Data size: 132 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int), cbigint (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3]
                     Statistics: Num rows: 20 Data size: 132 Basic stats: COMPLETE Column stats: COMPLETE
                     Map Join Operator
                       condition map:
@@ -283,14 +272,6 @@ STAGE PLANS:
                       keys:
                         0 _col0 (type: int)
                         1 _col0 (type: int)
-                      Map Join Vectorization:
-                          bigTableKeyColumns: [2]
-                          bigTableRetainedColumns: [3]
-                          bigTableValueColumns: [3]
-                          className: VectorMapJoinOuterLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                          projectedOutputColumns: [3]
                       outputColumnNames: _col1
                       input vertices:
                         1 Map 3
@@ -301,163 +282,63 @@ STAGE PLANS:
                         keys:
                           0 _col1 (type: bigint)
                           1 _col0 (type: bigint)
-                        Map Join Vectorization:
-                            bigTableKeyColumns: [3]
-                            bigTableRetainedColumns: [3]
-                            bigTableValueColumns: [3]
-                            className: VectorMapJoinOuterLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Supports Key Types IS true, Not empty key IS true, When Fast Hash Table, then requires no Hybrid Hash Join IS true, Small table vectorizes IS true
-                            projectedOutputColumns: [3]
                         outputColumnNames: _col1
                         input vertices:
                           1 Map 4
                         Statistics: Num rows: 162 Data size: 1296 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
                           aggregations: count(), sum(_col1)
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint, VectorUDAFSumLong(col 3) -> bigint
-                              className: VectorGroupByOperator
-                              vectorOutput: true
-                              native: false
-                              projectedOutputColumns: [0, 1]
                           mode: hash
                           outputColumnNames: _col0, _col1
                           Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                           Reduce Output Operator
                             sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkOperator
-                                native: false
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                                nativeConditionsNotMet: Uniform Hash IS false
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2, 3]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: cd
                   Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cint (type: int)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2]
                     Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: hd
                   Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Select Operator
                     expressions: cbigint (type: bigint)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [3]
                     Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: bigint)
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkLongOperator
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [3]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint
-                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), sum(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0) -> bigint, VectorUDAFSumLong(col 1) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat


[46/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 6167f48..3a179a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.optimizer.physical;
 import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNIFORM;
 
 import java.io.Serializable;
-import java.lang.annotation.Annotation;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -34,7 +33,6 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.Stack;
 import java.util.regex.Pattern;
-import org.apache.commons.lang.ArrayUtils;
 
 import org.apache.calcite.util.Pair;
 import org.apache.commons.lang.ArrayUtils;
@@ -45,8 +43,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.*;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
-import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask;
-import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
 import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
@@ -66,11 +62,7 @@ import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOpe
 import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkLongOperator;
 import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkMultiKeyOperator;
 import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkStringOperator;
-import org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type;
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping;
-import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping;
-import org.apache.hadoop.hive.ql.exec.vector.VectorFilterOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
@@ -81,7 +73,6 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.IdentityExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -100,36 +91,18 @@ import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
-import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
-import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
-import org.apache.hadoop.hive.ql.plan.HashTableSinkDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
-import org.apache.hadoop.hive.ql.plan.LimitDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.MapWork;
-import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.SelectDesc;
-import org.apache.hadoop.hive.ql.plan.VectorAppMasterEventDesc;
-import org.apache.hadoop.hive.ql.plan.VectorFileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.VectorFilterDesc;
-import org.apache.hadoop.hive.ql.plan.VectorTableScanDesc;
-import org.apache.hadoop.hive.ql.plan.VectorizationCondition;
 import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc.ProcessingMode;
-import org.apache.hadoop.hive.ql.plan.VectorSparkHashTableSinkDesc;
-import org.apache.hadoop.hive.ql.plan.VectorLimitDesc;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo;
 import org.apache.hadoop.hive.ql.plan.VectorPartitionConversion;
-import org.apache.hadoop.hive.ql.plan.VectorSMBJoinDesc;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
@@ -144,13 +117,10 @@ import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKeyType;
 import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.OperatorVariation;
 import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc.VectorDeserializeType;
-import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo;
 import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo;
 import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc;
-import org.apache.hadoop.hive.ql.plan.VectorSelectDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.ql.udf.UDFAcos;
 import org.apache.hadoop.hive.ql.udf.UDFAsin;
@@ -200,9 +170,6 @@ import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.NullStructSerDe;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
-import org.apache.hadoop.hive.serde2.SerDe;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -215,9 +182,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
-import org.apache.hive.common.util.AnnotationUtils;
-import org.apache.hive.common.util.HiveStringUtils;
-import org.apache.hive.common.util.ReflectionUtil;
 
 import com.google.common.base.Preconditions;
 
@@ -270,39 +234,12 @@ public class Vectorizer implements PhysicalPlanResolver {
 
   private boolean isSpark;
 
-  private boolean useVectorizedInputFileFormat;
-  private boolean useVectorDeserialize;
-  private boolean useRowDeserialize;
-  private boolean isReduceVectorizationEnabled;
+  boolean useVectorizedInputFileFormat;
+  boolean useVectorDeserialize;
+  boolean useRowDeserialize;
 
   boolean isSchemaEvolution;
 
-  private BaseWork currentBaseWork;
-  private Operator<? extends OperatorDesc> currentOperator;
-
-  public void testSetCurrentBaseWork(BaseWork testBaseWork) {
-    currentBaseWork = testBaseWork;
-  }
-
-  private void setNodeIssue(String issue) {
-    currentBaseWork.setNotVectorizedReason(
-        VectorizerReason.createNodeIssue(issue));
-  }
-
-  private void setOperatorIssue(String issue) {
-    currentBaseWork.setNotVectorizedReason(
-        VectorizerReason.createOperatorIssue(currentOperator, issue));
-  }
-
-  private void setExpressionIssue(String expressionTitle, String issue) {
-    currentBaseWork.setNotVectorizedReason(
-        VectorizerReason.createExpressionIssue(currentOperator, expressionTitle, issue));
-  }
-
-  private void clearNotVectorizedReason() {
-    currentBaseWork.setNotVectorizedReason(null);
-  }
-
   public Vectorizer() {
 
     supportedGenericUDFs.add(GenericUDFOPPlus.class);
@@ -432,10 +369,6 @@ public class Vectorizer implements PhysicalPlanResolver {
     int partitionColumnCount;
     boolean useVectorizedInputFileFormat;
 
-    boolean groupByVectorOutput;
-    boolean allNative;
-    boolean usesVectorUDFAdaptor;
-
     String[] scratchTypeNameArray;
 
     Set<Operator<? extends OperatorDesc>> nonVectorizedOps;
@@ -446,12 +379,6 @@ public class Vectorizer implements PhysicalPlanResolver {
       partitionColumnCount = 0;
     }
 
-    public void assume() {
-      groupByVectorOutput = true;
-      allNative = true;
-      usesVectorUDFAdaptor =  false;
-    }
-
     public void setAllColumnNames(List<String> allColumnNames) {
       this.allColumnNames = allColumnNames;
     }
@@ -467,19 +394,9 @@ public class Vectorizer implements PhysicalPlanResolver {
     public void setScratchTypeNameArray(String[] scratchTypeNameArray) {
       this.scratchTypeNameArray = scratchTypeNameArray;
     }
-    public void setGroupByVectorOutput(boolean groupByVectorOutput) {
-      this.groupByVectorOutput = groupByVectorOutput;
-    }
-    public void setAllNative(boolean allNative) {
-      this.allNative = allNative;
-    }
-    public void setUsesVectorUDFAdaptor(boolean usesVectorUDFAdaptor) {
-      this.usesVectorUDFAdaptor = usesVectorUDFAdaptor;
-    }
     public void setUseVectorizedInputFileFormat(boolean useVectorizedInputFileFormat) {
       this.useVectorizedInputFileFormat = useVectorizedInputFileFormat;
     }
-
     public void setNonVectorizedOps(Set<Operator<? extends OperatorDesc>> nonVectorizedOps) {
       this.nonVectorizedOps = nonVectorizedOps;
     }
@@ -511,14 +428,7 @@ public class Vectorizer implements PhysicalPlanResolver {
             scratchTypeNameArray);
       baseWork.setVectorizedRowBatchCtx(vectorizedRowBatchCtx);
 
-      if (baseWork instanceof MapWork) {
-        MapWork mapWork = (MapWork) baseWork;
-        mapWork.setUseVectorizedInputFileFormat(useVectorizedInputFileFormat);
-      }
-
-      baseWork.setAllNative(allNative);
-      baseWork.setGroupByVectorOutput(groupByVectorOutput);
-      baseWork.setUsesVectorUDFAdaptor(usesVectorUDFAdaptor);
+      baseWork.setUseVectorizedInputFileFormat(useVectorizedInputFileFormat);
     }
   }
 
@@ -535,29 +445,17 @@ public class Vectorizer implements PhysicalPlanResolver {
         throws SemanticException {
       Task<? extends Serializable> currTask = (Task<? extends Serializable>) nd;
       if (currTask instanceof MapRedTask) {
-        MapredWork mapredWork = ((MapRedTask) currTask).getWork();
-        convertMapWork(mapredWork.getMapWork(), false);
-        ReduceWork reduceWork = mapredWork.getReduceWork();
-        if (reduceWork != null) {
-          // Always set the EXPLAIN conditions.
-          setReduceWorkExplainConditions(reduceWork);
-
-          // We do not vectorize MR Reduce.
-        }
+        convertMapWork(((MapRedTask) currTask).getWork().getMapWork(), false);
       } else if (currTask instanceof TezTask) {
         TezWork work = ((TezTask) currTask).getWork();
-        for (BaseWork baseWork: work.getAllWork()) {
-          if (baseWork instanceof MapWork) {
-            convertMapWork((MapWork) baseWork, true);
-          } else if (baseWork instanceof ReduceWork) {
-            ReduceWork reduceWork = (ReduceWork) baseWork;
-
-            // Always set the EXPLAIN conditions.
-            setReduceWorkExplainConditions(reduceWork);
-
-            // We are only vectorizing Reduce under Tez/Spark.
-            if (isReduceVectorizationEnabled) {
-              convertReduceWork(reduceWork);
+        for (BaseWork w: work.getAllWork()) {
+          if (w instanceof MapWork) {
+            convertMapWork((MapWork) w, true);
+          } else if (w instanceof ReduceWork) {
+            // We are only vectorizing Reduce under Tez.
+            if (HiveConf.getBoolVar(hiveConf,
+                        HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED)) {
+              convertReduceWork((ReduceWork) w, true);
             }
           }
         }
@@ -565,43 +463,22 @@ public class Vectorizer implements PhysicalPlanResolver {
         SparkWork sparkWork = (SparkWork) currTask.getWork();
         for (BaseWork baseWork : sparkWork.getAllWork()) {
           if (baseWork instanceof MapWork) {
-            convertMapWork((MapWork) baseWork, true);
-          } else if (baseWork instanceof ReduceWork) {
-            ReduceWork reduceWork = (ReduceWork) baseWork;
-
-            // Always set the EXPLAIN conditions.
-            setReduceWorkExplainConditions(reduceWork);
-
-            if (isReduceVectorizationEnabled) {
-              convertReduceWork(reduceWork);
-            }
+            convertMapWork((MapWork) baseWork, false);
+          } else if (baseWork instanceof ReduceWork
+              && HiveConf.getBoolVar(hiveConf,
+                  HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED)) {
+            convertReduceWork((ReduceWork) baseWork, false);
           }
         }
       }
-
       return null;
     }
 
-    private void convertMapWork(MapWork mapWork, boolean isTezOrSpark) throws SemanticException {
-
-      mapWork.setVectorizationExamined(true);
-
-      // Global used when setting errors, etc.
-      currentBaseWork = mapWork;
-
+    private void convertMapWork(MapWork mapWork, boolean isTez) throws SemanticException {
       VectorTaskColumnInfo vectorTaskColumnInfo = new VectorTaskColumnInfo();
-      vectorTaskColumnInfo.assume();
-
-      boolean ret = validateMapWork(mapWork, vectorTaskColumnInfo, isTezOrSpark);
+      boolean ret = validateMapWork(mapWork, vectorTaskColumnInfo, isTez);
       if (ret) {
-        vectorizeMapWork(mapWork, vectorTaskColumnInfo, isTezOrSpark);
-      } else if (currentBaseWork.getVectorizationEnabled()) {
-        VectorizerReason notVectorizedReason  = currentBaseWork.getNotVectorizedReason();
-        if (notVectorizedReason == null) {
-          LOG.info("Cannot vectorize: unknown");
-        } else {
-          LOG.info("Cannot vectorize: " + notVectorizedReason.toString());
-        }
+        vectorizeMapWork(mapWork, vectorTaskColumnInfo, isTez);
       }
     }
 
@@ -622,7 +499,6 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork = mapWork.getAliasToWork();
       if ((aliasToWork == null) || (aliasToWork.size() == 0)) {
-        setNodeIssue("Vectorized map work requires work");
         return null;
       }
       int tableScanCount = 0;
@@ -631,7 +507,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       for (Entry<String, Operator<? extends OperatorDesc>> entry : aliasToWork.entrySet()) {
         Operator<?> op = entry.getValue();
         if (op == null) {
-          setNodeIssue("Vectorized map work requires a valid alias");
+          LOG.warn("Map work has invalid aliases to work with. Fail validation!");
           return null;
         }
         if (op instanceof TableScanOperator) {
@@ -641,7 +517,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         }
       }
       if (tableScanCount > 1) {
-        setNodeIssue("Vectorized map work only works with 1 TableScanOperator");
+        LOG.warn("Map work has more than 1 TableScanOperator. Fail validation!");
         return null;
       }
       return new ImmutablePair(alias, tableScanOperator);
@@ -682,6 +558,22 @@ public class Vectorizer implements PhysicalPlanResolver {
       }
     }
 
+    private String getHiveOptionsString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
+      sb.append("=");
+      sb.append(useVectorizedInputFileFormat);
+      sb.append(", ");
+      sb.append(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE.varname);
+      sb.append("=");
+      sb.append(useVectorDeserialize);
+      sb.append(", and ");
+      sb.append(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_ROW_DESERIALIZE.varname);
+      sb.append("=");
+      sb.append(useRowDeserialize);
+      return sb.toString();
+    }
+
     /*
      * There are 3 modes of reading for vectorization:
      *
@@ -696,58 +588,44 @@ public class Vectorizer implements PhysicalPlanResolver {
      *      the row object into the VectorizedRowBatch with VectorAssignRow.
      *      This picks up Input File Format not supported by the other two.
      */
-    private boolean verifyAndSetVectorPartDesc(PartitionDesc pd, boolean isAcidTable,
-        HashSet<String> inputFileFormatClassNameSet, HashSet<String> enabledConditionsMetSet,
-        ArrayList<String> enabledConditionsNotMetList) {
+    private boolean verifyAndSetVectorPartDesc(PartitionDesc pd, boolean isAcidTable) {
 
       String inputFileFormatClassName = pd.getInputFileFormatClassName();
 
-      // Always collect input file formats.
-      inputFileFormatClassNameSet.add(inputFileFormatClassName);
-
-      boolean isInputFileFormatVectorized = Utilities.isInputFileFormatVectorized(pd);
-
-      if (isAcidTable) {
-
-        // Today, ACID tables are only ORC and that format is vectorizable.  Verify these
-        // assumptions.
-        Preconditions.checkState(isInputFileFormatVectorized);
-        Preconditions.checkState(inputFileFormatClassName.equals(OrcInputFormat.class.getName()));
-
-        if (!useVectorizedInputFileFormat) {
-          enabledConditionsNotMetList.add(
-              "Vectorizing ACID tables requires " + HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
-          return false;
-        }
-
-        pd.setVectorPartitionDesc(
-            VectorPartitionDesc.createVectorizedInputFileFormat(
-                inputFileFormatClassName, Utilities.isInputFileFormatSelfDescribing(pd)));
-
-        enabledConditionsMetSet.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
-        return true;
-      }
-
       // Look for Pass-Thru case where InputFileFormat has VectorizedInputFormatInterface
       // and reads VectorizedRowBatch as a "row".
 
-      if (useVectorizedInputFileFormat) {
+      if (isAcidTable || useVectorizedInputFileFormat) {
 
-        if (isInputFileFormatVectorized) {
+        if (Utilities.isInputFileFormatVectorized(pd)) {
+
+          if (!useVectorizedInputFileFormat) {
+            LOG.info("ACID tables con only be vectorized for the input file format -- " +
+                "i.e. when Hive Configuration option " +
+                HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname +
+                "=true");
+            return false;
+          }
 
           pd.setVectorPartitionDesc(
               VectorPartitionDesc.createVectorizedInputFileFormat(
                   inputFileFormatClassName, Utilities.isInputFileFormatSelfDescribing(pd)));
 
-          enabledConditionsMetSet.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
           return true;
         }
-        // Fall through and look for other options...
+
+        // Today, ACID tables are only ORC and that format is vectorizable.  Verify this
+        // assumption.
+        Preconditions.checkState(!isAcidTable);
       }
 
-      if (!isSchemaEvolution) {
-        enabledConditionsNotMetList.add(
-            "Vectorizing tables without Schema Evolution requires " + HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
+      if (!(isSchemaEvolution || isAcidTable) &&
+        (useVectorDeserialize || useRowDeserialize)) {
+        LOG.info("Input format: " + inputFileFormatClassName + " cannot be vectorized" +
+            " when both " + HiveConf.ConfVars.HIVE_SCHEMA_EVOLUTION.varname + "=false and " +
+            " ACID table is " + isAcidTable + " and " +
+            " given the Hive Configuration options " + getHiveOptionsString());
+        return false;
       }
 
       String deserializerClassName = pd.getDeserializerClassName();
@@ -757,12 +635,6 @@ public class Vectorizer implements PhysicalPlanResolver {
       //
       // Do the "vectorized" row-by-row deserialization into a VectorizedRowBatch in the
       // VectorMapOperator.
-      boolean isTextFormat = inputFileFormatClassName.equals(TextInputFormat.class.getName()) &&
-          deserializerClassName.equals(LazySimpleSerDe.class.getName());
-      boolean isSequenceFormat =
-          inputFileFormatClassName.equals(SequenceFileInputFormat.class.getName()) &&
-          deserializerClassName.equals(LazyBinarySerDe.class.getName());
-      boolean isVectorDeserializeEligable = isTextFormat || isSequenceFormat;
 
       if (useVectorDeserialize) {
 
@@ -776,7 +648,8 @@ public class Vectorizer implements PhysicalPlanResolver {
         //    org.apache.hadoop.mapred.SequenceFileInputFormat
         //    org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-        if (isTextFormat) {
+        if (inputFileFormatClassName.equals(TextInputFormat.class.getName()) &&
+            deserializerClassName.equals(LazySimpleSerDe.class.getName())) {
 
           Properties properties = pd.getTableDesc().getProperties();
           String lastColumnTakesRestString =
@@ -786,11 +659,10 @@ public class Vectorizer implements PhysicalPlanResolver {
               lastColumnTakesRestString.equalsIgnoreCase("true"));
           if (lastColumnTakesRest) {
 
-            // If row mode will not catch this input file format, then not enabled.
+            // If row mode will not catch this, then inform.
             if (useRowDeserialize) {
-              enabledConditionsNotMetList.add(
-                  inputFileFormatClassName + " " +
-                  serdeConstants.SERIALIZATION_LAST_COLUMN_TAKES_REST + " must be disabled ");
+              LOG.info("Input format: " + inputFileFormatClassName + " cannot be vectorized" +
+                  " when " + serdeConstants.SERIALIZATION_LAST_COLUMN_TAKES_REST + "is true");
               return false;
             }
           } else {
@@ -798,19 +670,17 @@ public class Vectorizer implements PhysicalPlanResolver {
                 VectorPartitionDesc.createVectorDeserialize(
                     inputFileFormatClassName, VectorDeserializeType.LAZY_SIMPLE));
 
-            enabledConditionsMetSet.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE.varname);
             return true;
           }
-        } else if (isSequenceFormat) {
+        } else if (inputFileFormatClassName.equals(SequenceFileInputFormat.class.getName()) &&
+            deserializerClassName.equals(LazyBinarySerDe.class.getName())) {
 
           pd.setVectorPartitionDesc(
               VectorPartitionDesc.createVectorDeserialize(
                   inputFileFormatClassName, VectorDeserializeType.LAZY_BINARY));
 
-          enabledConditionsMetSet.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE.varname);
           return true;
         }
-        // Fall through and look for other options...
       }
 
       // Otherwise, if enabled, deserialize rows using regular Serde and add the object
@@ -824,29 +694,17 @@ public class Vectorizer implements PhysicalPlanResolver {
                 Utilities.isInputFileFormatSelfDescribing(pd),
                 deserializerClassName));
 
-        enabledConditionsMetSet.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_ROW_DESERIALIZE.varname);
         return true;
 
       }
 
-      if (isInputFileFormatVectorized) {
-        Preconditions.checkState(!useVectorizedInputFileFormat);
-        enabledConditionsNotMetList.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT.varname);
-      } else {
-        // Only offer these when the input file format is not the fast vectorized formats.
-        if (isVectorDeserializeEligable) {
-          Preconditions.checkState(!useVectorDeserialize);
-          enabledConditionsNotMetList.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE.varname);
-        } else {
-          // Since row mode takes everyone.
-          enabledConditionsNotMetList.add(HiveConf.ConfVars.HIVE_VECTORIZATION_USE_ROW_DESERIALIZE.varname);
-        }
-      }
- 
+      LOG.info("Input format: " + inputFileFormatClassName + " cannot be vectorized" +
+          " given the Hive Configuration options " + getHiveOptionsString());
+
       return false;
     }
 
-    private ImmutablePair<Boolean, Boolean> validateInputFormatAndSchemaEvolution(MapWork mapWork, String alias,
+    private boolean validateInputFormatAndSchemaEvolution(MapWork mapWork, String alias,
         TableScanOperator tableScanOperator, VectorTaskColumnInfo vectorTaskColumnInfo)
             throws SemanticException {
 
@@ -874,39 +732,27 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       LinkedHashMap<Path, ArrayList<String>> pathToAliases = mapWork.getPathToAliases();
       LinkedHashMap<Path, PartitionDesc> pathToPartitionInfo = mapWork.getPathToPartitionInfo();
-
-      // Remember the input file formats we validated and why.
-      HashSet<String> inputFileFormatClassNameSet = new HashSet<String>();
-      HashSet<String> enabledConditionsMetSet = new HashSet<String>();
-      ArrayList<String> enabledConditionsNotMetList = new ArrayList<String>();
-
       for (Entry<Path, ArrayList<String>> entry: pathToAliases.entrySet()) {
         Path path = entry.getKey();
         List<String> aliases = entry.getValue();
         boolean isPresent = (aliases != null && aliases.indexOf(alias) != -1);
         if (!isPresent) {
-          setOperatorIssue("Alias " + alias + " not present in aliases " + aliases);
-          return new ImmutablePair<Boolean,Boolean>(false, false);
+          LOG.info("Alias " + alias + " not present in aliases " + aliases);
+          return false;
         }
         PartitionDesc partDesc = pathToPartitionInfo.get(path);
         if (partDesc.getVectorPartitionDesc() != null) {
           // We seen this already.
           continue;
         }
-        if (!verifyAndSetVectorPartDesc(partDesc, isAcidTable, inputFileFormatClassNameSet,
-            enabledConditionsMetSet, enabledConditionsNotMetList)) {
-
-          // Always set these so EXPLAIN can see.
-          mapWork.setVectorizationInputFileFormatClassNameSet(inputFileFormatClassNameSet);
-          mapWork.setVectorizationEnabledConditionsMet(new ArrayList(enabledConditionsMetSet));
-          mapWork.setVectorizationEnabledConditionsNotMet(enabledConditionsNotMetList);
-
-          // We consider this an enable issue, not a not vectorized issue.
-          LOG.info("Cannot enable vectorization because input file format(s) " + inputFileFormatClassNameSet +
-              " do not met conditions " + VectorizationCondition.addBooleans(enabledConditionsNotMetList, false));
-          return new ImmutablePair<Boolean,Boolean>(false, true);
+        if (!verifyAndSetVectorPartDesc(partDesc, isAcidTable)) {
+          return false;
         }
         VectorPartitionDesc vectorPartDesc = partDesc.getVectorPartitionDesc();
+        if (LOG.isInfoEnabled()) {
+          LOG.info("Vectorizer path: " + path + ", " + vectorPartDesc.toString() +
+              ", aliases " + aliases);
+        }
 
         if (isFirst) {
 
@@ -950,13 +796,13 @@ public class Vectorizer implements PhysicalPlanResolver {
          * implicitly defaulted to null.
          */
         if (nextDataColumnList.size() > tableDataColumnList.size()) {
-          setOperatorIssue(
+          LOG.info(
               String.format(
                   "Could not vectorize partition %s " +
                   "(deserializer " + deserializer.getClass().getName() + ")" +
                   "The partition column names %d is greater than the number of table columns %d",
                   path, nextDataColumnList.size(), tableDataColumnList.size()));
-          return new ImmutablePair<Boolean,Boolean>(false, false);
+          return false;
         }
         if (!(deserializer instanceof NullStructSerDe)) {
 
@@ -965,13 +811,13 @@ public class Vectorizer implements PhysicalPlanResolver {
             String nextColumnName = nextDataColumnList.get(i);
             String tableColumnName = tableDataColumnList.get(i);
             if (!nextColumnName.equals(tableColumnName)) {
-              setOperatorIssue(
+              LOG.info(
                   String.format(
                       "Could not vectorize partition %s " +
                       "(deserializer " + deserializer.getClass().getName() + ")" +
                       "The partition column name %s is does not match table column name %s",
                       path, nextColumnName, tableColumnName));
-              return new ImmutablePair<Boolean,Boolean>(false, false);
+              return false;
             }
           }
         }
@@ -1006,50 +852,29 @@ public class Vectorizer implements PhysicalPlanResolver {
       // Helps to keep this for debugging.
       vectorTaskColumnInfo.setTableScanOperator(tableScanOperator);
 
-      // Always set these so EXPLAIN can see.
-      mapWork.setVectorizationInputFileFormatClassNameSet(inputFileFormatClassNameSet);
-      mapWork.setVectorizationEnabledConditionsMet(new ArrayList(enabledConditionsMetSet));
-      mapWork.setVectorizationEnabledConditionsNotMet(enabledConditionsNotMetList);
-
-      return new ImmutablePair<Boolean,Boolean>(true, false);
+      return true;
     }
 
-    private boolean validateMapWork(MapWork mapWork, VectorTaskColumnInfo vectorTaskColumnInfo, boolean isTezOrSpark)
+    private boolean validateMapWork(MapWork mapWork, VectorTaskColumnInfo vectorTaskColumnInfo, boolean isTez)
             throws SemanticException {
 
       LOG.info("Validating MapWork...");
 
-      ImmutablePair<String,TableScanOperator> onlyOneTableScanPair = verifyOnlyOneTableScanOperator(mapWork);
-      if (onlyOneTableScanPair ==  null) {
-        VectorizerReason notVectorizedReason = currentBaseWork.getNotVectorizedReason();
-        Preconditions.checkState(notVectorizedReason != null);
-        mapWork.setVectorizationEnabledConditionsNotMet(Arrays.asList(new String[] {notVectorizedReason.toString()}));
+      ImmutablePair<String,TableScanOperator> pair = verifyOnlyOneTableScanOperator(mapWork);
+      if (pair ==  null) {
         return false;
       }
-      String alias = onlyOneTableScanPair.left;
-      TableScanOperator tableScanOperator = onlyOneTableScanPair.right;
+      String alias = pair.left;
+      TableScanOperator tableScanOperator = pair.right;
 
       // This call fills in the column names, types, and partition column count in
       // vectorTaskColumnInfo.
-      currentOperator = tableScanOperator;
-      ImmutablePair<Boolean, Boolean> validateInputFormatAndSchemaEvolutionPair =
-          validateInputFormatAndSchemaEvolution(mapWork, alias, tableScanOperator, vectorTaskColumnInfo);
-      if (!validateInputFormatAndSchemaEvolutionPair.left) {
-        // Have we already set the enabled conditions not met?
-        if (!validateInputFormatAndSchemaEvolutionPair.right) {
-          VectorizerReason notVectorizedReason = currentBaseWork.getNotVectorizedReason();
-          Preconditions.checkState(notVectorizedReason != null);
-          mapWork.setVectorizationEnabledConditionsNotMet(Arrays.asList(new String[] {notVectorizedReason.toString()}));
-        }
+      if (!validateInputFormatAndSchemaEvolution(mapWork, alias, tableScanOperator, vectorTaskColumnInfo)) {
         return false;
       }
 
-      // Now we are enabled and any issues found from here on out are considered
-      // not vectorized issues.
-      mapWork.setVectorizationEnabled(true);
-
       Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
-      MapWorkValidationNodeProcessor vnp = new MapWorkValidationNodeProcessor(mapWork, isTezOrSpark);
+      MapWorkValidationNodeProcessor vnp = new MapWorkValidationNodeProcessor(mapWork, isTez);
       addMapWorkRules(opRules, vnp);
       Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
       GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -1071,13 +896,13 @@ public class Vectorizer implements PhysicalPlanResolver {
     }
 
     private void vectorizeMapWork(MapWork mapWork, VectorTaskColumnInfo vectorTaskColumnInfo,
-            boolean isTezOrSpark) throws SemanticException {
+            boolean isTez) throws SemanticException {
 
       LOG.info("Vectorizing MapWork...");
       mapWork.setVectorMode(true);
       Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
       MapWorkVectorizationNodeProcessor vnp =
-          new MapWorkVectorizationNodeProcessor(mapWork, isTezOrSpark, vectorTaskColumnInfo);
+          new MapWorkVectorizationNodeProcessor(mapWork, isTez, vectorTaskColumnInfo);
       addMapWorkRules(opRules, vnp);
       Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
       GraphWalker ogw = new PreOrderOnceWalker(disp);
@@ -1098,34 +923,11 @@ public class Vectorizer implements PhysicalPlanResolver {
       return;
     }
 
-    private void setReduceWorkExplainConditions(ReduceWork reduceWork) {
-
-      reduceWork.setVectorizationExamined(true);
-
-      reduceWork.setReduceVectorizationEnabled(isReduceVectorizationEnabled);
-      reduceWork.setVectorReduceEngine(
-          HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE));
-    }
-
-    private void convertReduceWork(ReduceWork reduceWork) throws SemanticException {
-
-      // Global used when setting errors, etc.
-      currentBaseWork = reduceWork;
-      currentBaseWork.setVectorizationEnabled(true);
-
+    private void convertReduceWork(ReduceWork reduceWork, boolean isTez) throws SemanticException {
       VectorTaskColumnInfo vectorTaskColumnInfo = new VectorTaskColumnInfo();
-      vectorTaskColumnInfo.assume();
-
-      boolean ret = validateReduceWork(reduceWork, vectorTaskColumnInfo);
+      boolean ret = validateReduceWork(reduceWork, vectorTaskColumnInfo, isTez);
       if (ret) {
-        vectorizeReduceWork(reduceWork, vectorTaskColumnInfo);
-      } else if (currentBaseWork.getVectorizationEnabled()) {
-        VectorizerReason notVectorizedReason  = currentBaseWork.getNotVectorizedReason();
-        if (notVectorizedReason == null) {
-          LOG.info("Cannot vectorize: unknown");
-        } else {
-          LOG.info("Cannot vectorize: " + notVectorizedReason.toString());
-        }
+        vectorizeReduceWork(reduceWork, vectorTaskColumnInfo, isTez);
       }
     }
 
@@ -1139,14 +941,13 @@ public class Vectorizer implements PhysicalPlanResolver {
         // Check key ObjectInspector.
         ObjectInspector keyObjectInspector = reduceWork.getKeyObjectInspector();
         if (keyObjectInspector == null || !(keyObjectInspector instanceof StructObjectInspector)) {
-          setNodeIssue("Key object inspector missing or not StructObjectInspector");
           return false;
         }
         StructObjectInspector keyStructObjectInspector = (StructObjectInspector)keyObjectInspector;
         List<? extends StructField> keyFields = keyStructObjectInspector.getAllStructFieldRefs();
 
+        // Tez doesn't use tagging...
         if (reduceWork.getNeedsTagging()) {
-          setNodeIssue("Tez doesn't use tagging");
           return false;
         }
 
@@ -1154,7 +955,6 @@ public class Vectorizer implements PhysicalPlanResolver {
         ObjectInspector valueObjectInspector = reduceWork.getValueObjectInspector();
         if (valueObjectInspector == null ||
                 !(valueObjectInspector instanceof StructObjectInspector)) {
-          setNodeIssue("Value object inspector missing or not StructObjectInspector");
           return false;
         }
         StructObjectInspector valueStructObjectInspector = (StructObjectInspector)valueObjectInspector;
@@ -1184,7 +984,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     }
 
     private boolean validateReduceWork(ReduceWork reduceWork,
-        VectorTaskColumnInfo vectorTaskColumnInfo) throws SemanticException {
+        VectorTaskColumnInfo vectorTaskColumnInfo, boolean isTez) throws SemanticException {
 
       LOG.info("Validating ReduceWork...");
 
@@ -1215,7 +1015,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     }
 
     private void vectorizeReduceWork(ReduceWork reduceWork,
-        VectorTaskColumnInfo vectorTaskColumnInfo) throws SemanticException {
+        VectorTaskColumnInfo vectorTaskColumnInfo, boolean isTez) throws SemanticException {
 
       LOG.info("Vectorizing ReduceWork...");
       reduceWork.setVectorMode(true);
@@ -1225,7 +1025,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       // VectorizationContext...  Do we use PreOrderWalker instead of DefaultGraphWalker.
       Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
       ReduceWorkVectorizationNodeProcessor vnp =
-              new ReduceWorkVectorizationNodeProcessor(vectorTaskColumnInfo);
+              new ReduceWorkVectorizationNodeProcessor(vectorTaskColumnInfo, isTez);
       addReduceWorkRules(opRules, vnp);
       Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
       GraphWalker ogw = new PreOrderWalker(disp);
@@ -1253,7 +1053,7 @@ public class Vectorizer implements PhysicalPlanResolver {
   class MapWorkValidationNodeProcessor implements NodeProcessor {
 
     private final MapWork mapWork;
-    private final boolean isTezOrSpark;
+    private final boolean isTez;
 
     // Children of Vectorized GROUPBY that outputs rows instead of vectorized row batchs.
     protected final Set<Operator<? extends OperatorDesc>> nonVectorizedOps =
@@ -1263,9 +1063,9 @@ public class Vectorizer implements PhysicalPlanResolver {
       return nonVectorizedOps;
     }
 
-    public MapWorkValidationNodeProcessor(MapWork mapWork, boolean isTezOrSpark) {
+    public MapWorkValidationNodeProcessor(MapWork mapWork, boolean isTez) {
       this.mapWork = mapWork;
-      this.isTezOrSpark = isTezOrSpark;
+      this.isTez = isTez;
     }
 
     @Override
@@ -1277,13 +1077,13 @@ public class Vectorizer implements PhysicalPlanResolver {
           return new Boolean(true);
         }
         boolean ret;
-        currentOperator = op;
         try {
-          ret = validateMapWorkOperator(op, mapWork, isTezOrSpark);
+          ret = validateMapWorkOperator(op, mapWork, isTez);
         } catch (Exception e) {
           throw new SemanticException(e);
         }
         if (!ret) {
+          LOG.info("MapWork Operator: " + op.getName() + " could not be vectorized.");
           return new Boolean(false);
         }
         // When Vectorized GROUPBY outputs rows instead of vectorized row batches, we don't
@@ -1319,9 +1119,9 @@ public class Vectorizer implements PhysicalPlanResolver {
         if (nonVectorizedOps.contains(op)) {
           return new Boolean(true);
         }
-        currentOperator = op;
         boolean ret = validateReduceWorkOperator(op);
         if (!ret) {
+          LOG.info("ReduceWork Operator: " + op.getName() + " could not be vectorized.");
           return new Boolean(false);
         }
         // When Vectorized GROUPBY outputs rows instead of vectorized row batches, we don't
@@ -1342,12 +1142,9 @@ public class Vectorizer implements PhysicalPlanResolver {
     // The vectorization context for the Map or Reduce task.
     protected VectorizationContext taskVectorizationContext;
 
-    protected final VectorTaskColumnInfo vectorTaskColumnInfo;
     protected final Set<Operator<? extends OperatorDesc>> nonVectorizedOps;
 
-    VectorizationNodeProcessor(VectorTaskColumnInfo vectorTaskColumnInfo,
-        Set<Operator<? extends OperatorDesc>> nonVectorizedOps) {
-      this.vectorTaskColumnInfo = vectorTaskColumnInfo;
+    VectorizationNodeProcessor(Set<Operator<? extends OperatorDesc>> nonVectorizedOps) {
       this.nonVectorizedOps = nonVectorizedOps;
     }
 
@@ -1395,11 +1192,11 @@ public class Vectorizer implements PhysicalPlanResolver {
     }
 
     public Operator<? extends OperatorDesc> doVectorize(Operator<? extends OperatorDesc> op,
-            VectorizationContext vContext, boolean isTezOrSpark) throws SemanticException {
+            VectorizationContext vContext, boolean isTez) throws SemanticException {
       Operator<? extends OperatorDesc> vectorOp = op;
       try {
         if (!opsDone.contains(op)) {
-          vectorOp = vectorizeOperator(op, vContext, isTezOrSpark, vectorTaskColumnInfo);
+          vectorOp = vectorizeOperator(op, vContext, isTez);
           opsDone.add(op);
           if (vectorOp != op) {
             opToVectorOpMap.put(op, vectorOp);
@@ -1423,14 +1220,14 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     private final MapWork mWork;
     private final VectorTaskColumnInfo vectorTaskColumnInfo;
-    private final boolean isTezOrSpark;
+    private final boolean isTez;
 
-    public MapWorkVectorizationNodeProcessor(MapWork mWork, boolean isTezOrSpark,
+    public MapWorkVectorizationNodeProcessor(MapWork mWork, boolean isTez,
         VectorTaskColumnInfo vectorTaskColumnInfo) {
-      super(vectorTaskColumnInfo, vectorTaskColumnInfo.getNonVectorizedOps());
+      super(vectorTaskColumnInfo.getNonVectorizedOps());
       this.mWork = mWork;
       this.vectorTaskColumnInfo = vectorTaskColumnInfo;
-      this.isTezOrSpark = isTezOrSpark;
+      this.isTez = isTez;
     }
 
     @Override
@@ -1444,7 +1241,6 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       VectorizationContext vContext = null;
 
-      currentOperator = op;
       if (op instanceof TableScanOperator) {
         if (taskVectorizationContext == null) {
           taskVectorizationContext = getVectorizationContext(op.getName(), vectorTaskColumnInfo);
@@ -1465,7 +1261,7 @@ public class Vectorizer implements PhysicalPlanResolver {
             + " using vectorization context" + vContext.toString());
       }
 
-      Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext, isTezOrSpark);
+      Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext, isTez);
 
       if (LOG.isDebugEnabled()) {
         if (vectorOp instanceof VectorizationContextRegion) {
@@ -1483,6 +1279,7 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     private final VectorTaskColumnInfo vectorTaskColumnInfo;
 
+    private final boolean isTez;
 
     private Operator<? extends OperatorDesc> rootVectorOp;
 
@@ -1490,11 +1287,13 @@ public class Vectorizer implements PhysicalPlanResolver {
       return rootVectorOp;
     }
 
-    public ReduceWorkVectorizationNodeProcessor(VectorTaskColumnInfo vectorTaskColumnInfo) {
+    public ReduceWorkVectorizationNodeProcessor(VectorTaskColumnInfo vectorTaskColumnInfo,
+            boolean isTez) {
 
-      super(vectorTaskColumnInfo, vectorTaskColumnInfo.getNonVectorizedOps());
+      super(vectorTaskColumnInfo.getNonVectorizedOps());
       this.vectorTaskColumnInfo =  vectorTaskColumnInfo;
       rootVectorOp = null;
+      this.isTez = isTez;
     }
 
     @Override
@@ -1510,7 +1309,6 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       boolean saveRootVectorOp = false;
 
-      currentOperator = op;
       if (op.getParentOperators().size() == 0) {
         LOG.info("ReduceWorkVectorizationNodeProcessor process reduceColumnNames " + vectorTaskColumnInfo.allColumnNames.toString());
 
@@ -1535,7 +1333,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       assert vContext != null;
       LOG.info("ReduceWorkVectorizationNodeProcessor process operator " + op.getName() + " using vectorization context" + vContext.toString());
 
-      Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext, true);
+      Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext, isTez);
 
       if (LOG.isDebugEnabled()) {
         if (vectorOp instanceof VectorizationContextRegion) {
@@ -1592,10 +1390,6 @@ public class Vectorizer implements PhysicalPlanResolver {
         HiveConf.getBoolVar(hiveConf,
             HiveConf.ConfVars.HIVE_VECTORIZATION_USE_ROW_DESERIALIZE);
 
-    isReduceVectorizationEnabled =
-        HiveConf.getBoolVar(hiveConf,
-            HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_ENABLED);
-
     isSchemaEvolution =
         HiveConf.getBoolVar(hiveConf,
             HiveConf.ConfVars.HIVE_SCHEMA_EVOLUTION);
@@ -1613,32 +1407,18 @@ public class Vectorizer implements PhysicalPlanResolver {
     return physicalContext;
   }
 
-  private void setOperatorNotSupported(Operator<? extends OperatorDesc> op) {
-    OperatorDesc desc = op.getConf();
-    Annotation note = AnnotationUtils.getAnnotation(desc.getClass(), Explain.class);
-    if (note != null) {
-      Explain explainNote = (Explain) note;
-      setNodeIssue(explainNote.displayName() + " (" + op.getType() + ") not supported");
-    } else {
-      setNodeIssue("Operator " + op.getType() + " not supported");
-    }
-  }
-
-  boolean validateMapWorkOperator(Operator<? extends OperatorDesc> op, MapWork mWork, boolean isTezOrSpark) {
-    boolean ret;
+  boolean validateMapWorkOperator(Operator<? extends OperatorDesc> op, MapWork mWork, boolean isTez) {
+    boolean ret = false;
     switch (op.getType()) {
       case MAPJOIN:
         if (op instanceof MapJoinOperator) {
           ret = validateMapJoinOperator((MapJoinOperator) op);
         } else if (op instanceof SMBMapJoinOperator) {
           ret = validateSMBMapJoinOperator((SMBMapJoinOperator) op);
-        } else {
-          setOperatorNotSupported(op);
-          ret = false;
         }
         break;
       case GROUPBY:
-        ret = validateGroupByOperator((GroupByOperator) op, false, isTezOrSpark);
+        ret = validateGroupByOperator((GroupByOperator) op, false, isTez);
         break;
       case FILTER:
         ret = validateFilterOperator((FilterOperator) op);
@@ -1663,7 +1443,6 @@ public class Vectorizer implements PhysicalPlanResolver {
             validateSparkHashTableSinkOperator((SparkHashTableSinkOperator) op);
         break;
       default:
-        setOperatorNotSupported(op);
         ret = false;
         break;
     }
@@ -1671,7 +1450,7 @@ public class Vectorizer implements PhysicalPlanResolver {
   }
 
   boolean validateReduceWorkOperator(Operator<? extends OperatorDesc> op) {
-    boolean ret;
+    boolean ret = false;
     switch (op.getType()) {
       case MAPJOIN:
         // Does MAPJOIN actually get planned in Reduce?
@@ -1679,9 +1458,6 @@ public class Vectorizer implements PhysicalPlanResolver {
           ret = validateMapJoinOperator((MapJoinOperator) op);
         } else if (op instanceof SMBMapJoinOperator) {
           ret = validateSMBMapJoinOperator((SMBMapJoinOperator) op);
-        } else {
-          setOperatorNotSupported(op);
-          ret = false;
         }
         break;
       case GROUPBY:
@@ -1689,7 +1465,6 @@ public class Vectorizer implements PhysicalPlanResolver {
                     HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED)) {
           ret = validateGroupByOperator((GroupByOperator) op, true, true);
         } else {
-          setNodeIssue("Operator " + op.getType() + " not enabled (" + HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED.name() + "=true IS false)");
           ret = false;
         }
         break;
@@ -1715,7 +1490,6 @@ public class Vectorizer implements PhysicalPlanResolver {
             validateSparkHashTableSinkOperator((SparkHashTableSinkOperator) op);
         break;
       default:
-        setOperatorNotSupported(op);
         ret = false;
         break;
     }
@@ -1738,7 +1512,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       throws SemanticException {
     if (op.getType().equals(OperatorType.GROUPBY)) {
       GroupByDesc desc = (GroupByDesc) op.getConf();
-      return !((VectorGroupByDesc) desc.getVectorDesc()).isVectorOutput();
+      return !desc.getVectorDesc().isVectorOutput();
     }
     return false;
   }
@@ -1752,7 +1526,6 @@ public class Vectorizer implements PhysicalPlanResolver {
   private boolean validateTableScanOperator(TableScanOperator op, MapWork mWork) {
     TableScanDesc desc = op.getConf();
     if (desc.isGatherStats()) {
-      setOperatorIssue("gather stats not supported");
       return false;
     }
 
@@ -1767,21 +1540,25 @@ public class Vectorizer implements PhysicalPlanResolver {
   private boolean validateMapJoinDesc(MapJoinDesc desc) {
     byte posBigTable = (byte) desc.getPosBigTable();
     List<ExprNodeDesc> filterExprs = desc.getFilters().get(posBigTable);
-    if (!validateExprNodeDesc(filterExprs, "Filter", VectorExpressionDescriptor.Mode.FILTER)) {
+    if (!validateExprNodeDesc(filterExprs, VectorExpressionDescriptor.Mode.FILTER)) {
+      LOG.info("Cannot vectorize map work filter expression");
       return false;
     }
     List<ExprNodeDesc> keyExprs = desc.getKeys().get(posBigTable);
-    if (!validateExprNodeDesc(keyExprs, "Key")) {
+    if (!validateExprNodeDesc(keyExprs)) {
+      LOG.info("Cannot vectorize map work key expression");
       return false;
     }
     List<ExprNodeDesc> valueExprs = desc.getExprs().get(posBigTable);
-    if (!validateExprNodeDesc(valueExprs, "Value")) {
+    if (!validateExprNodeDesc(valueExprs)) {
+      LOG.info("Cannot vectorize map work value expression");
       return false;
     }
     Byte[] order = desc.getTagOrder();
     Byte posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]);
     List<ExprNodeDesc> smallTableExprs = desc.getExprs().get(posSingleVectorMapJoinSmallTable);
-    if (!validateExprNodeDesc(smallTableExprs, "Small Table")) {
+    if (!validateExprNodeDesc(smallTableExprs)) {
+      LOG.info("Cannot vectorize map work small table expression");
       return false;
     }
     return true;
@@ -1794,23 +1571,24 @@ public class Vectorizer implements PhysicalPlanResolver {
     List<ExprNodeDesc> filterExprs = desc.getFilters().get(tag);
     List<ExprNodeDesc> keyExprs = desc.getKeys().get(tag);
     List<ExprNodeDesc> valueExprs = desc.getExprs().get(tag);
-    return validateExprNodeDesc(filterExprs, "Filter", VectorExpressionDescriptor.Mode.FILTER) &&
-        validateExprNodeDesc(keyExprs, "Key") && validateExprNodeDesc(valueExprs, "Value");
+    return validateExprNodeDesc(filterExprs, VectorExpressionDescriptor.Mode.FILTER) &&
+        validateExprNodeDesc(keyExprs) && validateExprNodeDesc(valueExprs);
   }
 
   private boolean validateReduceSinkOperator(ReduceSinkOperator op) {
     List<ExprNodeDesc> keyDescs = op.getConf().getKeyCols();
     List<ExprNodeDesc> partitionDescs = op.getConf().getPartitionCols();
     List<ExprNodeDesc> valueDesc = op.getConf().getValueCols();
-    return validateExprNodeDesc(keyDescs, "Key") && validateExprNodeDesc(partitionDescs, "Partition") &&
-        validateExprNodeDesc(valueDesc, "Value");
+    return validateExprNodeDesc(keyDescs) && validateExprNodeDesc(partitionDescs) &&
+        validateExprNodeDesc(valueDesc);
   }
 
   private boolean validateSelectOperator(SelectOperator op) {
     List<ExprNodeDesc> descList = op.getConf().getColList();
     for (ExprNodeDesc desc : descList) {
-      boolean ret = validateExprNodeDesc(desc, "Select");
+      boolean ret = validateExprNodeDesc(desc);
       if (!ret) {
+        LOG.info("Cannot vectorize select expression: " + desc.toString());
         return false;
       }
     }
@@ -1819,26 +1597,28 @@ public class Vectorizer implements PhysicalPlanResolver {
 
   private boolean validateFilterOperator(FilterOperator op) {
     ExprNodeDesc desc = op.getConf().getPredicate();
-    return validateExprNodeDesc(desc, "Predicate", VectorExpressionDescriptor.Mode.FILTER);
+    return validateExprNodeDesc(desc, VectorExpressionDescriptor.Mode.FILTER);
   }
 
-  private boolean validateGroupByOperator(GroupByOperator op, boolean isReduce, boolean isTezOrSpark) {
+  private boolean validateGroupByOperator(GroupByOperator op, boolean isReduce, boolean isTez) {
     GroupByDesc desc = op.getConf();
+    VectorGroupByDesc vectorDesc = desc.getVectorDesc();
 
     if (desc.isGroupingSetsPresent()) {
-      setOperatorIssue("Grouping sets not supported");
+      LOG.info("Grouping sets not supported in vector mode");
       return false;
     }
     if (desc.pruneGroupingSetId()) {
-      setOperatorIssue("Pruning grouping set id not supported");
+      LOG.info("Pruning grouping set id not supported in vector mode");
       return false;
     }
     if (desc.getMode() != GroupByDesc.Mode.HASH && desc.isDistinct()) {
-      setOperatorIssue("DISTINCT not supported");
+      LOG.info("DISTINCT not supported in vector mode");
       return false;
     }
-    boolean ret = validateExprNodeDesc(desc.getKeys(), "Key");
+    boolean ret = validateExprNodeDesc(desc.getKeys());
     if (!ret) {
+      LOG.info("Cannot vectorize groupby key expression " + desc.getKeys().toString());
       return false;
     }
 
@@ -1951,9 +1731,6 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     // If all the aggregation outputs are primitive, we can output VectorizedRowBatch.
     // Otherwise, we the rest of the operator tree will be row mode.
-    VectorGroupByDesc vectorDesc = new VectorGroupByDesc();
-    desc.setVectorDesc(vectorDesc);
-
     vectorDesc.setVectorOutput(retPair.right);
 
     vectorDesc.setProcessingMode(processingMode);
@@ -1968,15 +1745,14 @@ public class Vectorizer implements PhysicalPlanResolver {
    return true;
   }
 
-  private boolean validateExprNodeDesc(List<ExprNodeDesc> descs, String expressionTitle) {
-    return validateExprNodeDesc(descs, expressionTitle, VectorExpressionDescriptor.Mode.PROJECTION);
+  private boolean validateExprNodeDesc(List<ExprNodeDesc> descs) {
+    return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION);
   }
 
   private boolean validateExprNodeDesc(List<ExprNodeDesc> descs,
-          String expressionTitle,
           VectorExpressionDescriptor.Mode mode) {
     for (ExprNodeDesc d : descs) {
-      boolean ret = validateExprNodeDesc(d, expressionTitle, mode);
+      boolean ret = validateExprNodeDesc(d, mode);
       if (!ret) {
         return false;
       }
@@ -1999,20 +1775,19 @@ public class Vectorizer implements PhysicalPlanResolver {
     return new Pair<Boolean, Boolean>(true, outputIsPrimitive);
   }
 
-  private boolean validateExprNodeDescRecursive(ExprNodeDesc desc, String expressionTitle,
-      VectorExpressionDescriptor.Mode mode) {
+  private boolean validateExprNodeDescRecursive(ExprNodeDesc desc, VectorExpressionDescriptor.Mode mode) {
     if (desc instanceof ExprNodeColumnDesc) {
       ExprNodeColumnDesc c = (ExprNodeColumnDesc) desc;
       // Currently, we do not support vectorized virtual columns (see HIVE-5570).
       if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(c.getColumn())) {
-        setExpressionIssue(expressionTitle, "Virtual columns not supported (" + c.getColumn() + ")");
+        LOG.info("Cannot vectorize virtual column " + c.getColumn());
         return false;
       }
     }
     String typeName = desc.getTypeInfo().getTypeName();
     boolean ret = validateDataType(typeName, mode);
     if (!ret) {
-      setExpressionIssue(expressionTitle, "Data type " + typeName + " of " + desc.toString() + " not supported");
+      LOG.info("Cannot vectorize " + desc.toString() + " of type " + typeName);
       return false;
     }
     boolean isInExpression = false;
@@ -2020,7 +1795,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       ExprNodeGenericFuncDesc d = (ExprNodeGenericFuncDesc) desc;
       boolean r = validateGenericUdf(d);
       if (!r) {
-        setExpressionIssue(expressionTitle, "UDF " + d + " not supported");
+        LOG.info("Cannot vectorize UDF " + d);
         return false;
       }
       GenericUDF genericUDF = d.getGenericUDF();
@@ -2031,14 +1806,14 @@ public class Vectorizer implements PhysicalPlanResolver {
           && desc.getChildren().get(0).getTypeInfo().getCategory() == Category.STRUCT) {
         // Don't restrict child expressions for projection.
         // Always use loose FILTER mode.
-        if (!validateStructInExpression(desc, expressionTitle, VectorExpressionDescriptor.Mode.FILTER)) {
+        if (!validateStructInExpression(desc, VectorExpressionDescriptor.Mode.FILTER)) {
           return false;
         }
       } else {
         for (ExprNodeDesc d : desc.getChildren()) {
           // Don't restrict child expressions for projection.
           // Always use loose FILTER mode.
-          if (!validateExprNodeDescRecursive(d, expressionTitle, VectorExpressionDescriptor.Mode.FILTER)) {
+          if (!validateExprNodeDescRecursive(d, VectorExpressionDescriptor.Mode.FILTER)) {
             return false;
           }
         }
@@ -2048,7 +1823,7 @@ public class Vectorizer implements PhysicalPlanResolver {
   }
 
   private boolean validateStructInExpression(ExprNodeDesc desc,
-      String expressionTitle, VectorExpressionDescriptor.Mode mode) {
+      VectorExpressionDescriptor.Mode mode) {
     for (ExprNodeDesc d : desc.getChildren()) {
       TypeInfo typeInfo = d.getTypeInfo();
       if (typeInfo.getCategory() != Category.STRUCT) {
@@ -2064,8 +1839,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         TypeInfo fieldTypeInfo = fieldTypeInfos.get(f);
         Category category = fieldTypeInfo.getCategory();
         if (category != Category.PRIMITIVE) {
-          setExpressionIssue(expressionTitle,
-              "Cannot vectorize struct field " + fieldNames.get(f)
+          LOG.info("Cannot vectorize struct field " + fieldNames.get(f)
               + " of type " + fieldTypeInfo.getTypeName());
           return false;
         }
@@ -2078,8 +1852,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         if (inConstantType != InConstantType.INT_FAMILY
             && inConstantType != InConstantType.FLOAT_FAMILY
             && inConstantType != InConstantType.STRING_FAMILY) {
-          setExpressionIssue(expressionTitle,
-              "Cannot vectorize struct field " + fieldNames.get(f)
+          LOG.info("Cannot vectorize struct field " + fieldNames.get(f)
               + " of type " + fieldTypeInfo.getTypeName());
           return false;
         }
@@ -2088,28 +1861,31 @@ public class Vectorizer implements PhysicalPlanResolver {
     return true;
   }
 
-  private boolean validateExprNodeDesc(ExprNodeDesc desc, String expressionTitle) {
-    return validateExprNodeDesc(desc, expressionTitle, VectorExpressionDescriptor.Mode.PROJECTION);
+  private boolean validateExprNodeDesc(ExprNodeDesc desc) {
+    return validateExprNodeDesc(desc, VectorExpressionDescriptor.Mode.PROJECTION);
   }
 
-  boolean validateExprNodeDesc(ExprNodeDesc desc, String expressionTitle,
-      VectorExpressionDescriptor.Mode mode) {
-    if (!validateExprNodeDescRecursive(desc, expressionTitle, mode)) {
+  boolean validateExprNodeDesc(ExprNodeDesc desc, VectorExpressionDescriptor.Mode mode) {
+    if (!validateExprNodeDescRecursive(desc, mode)) {
       return false;
     }
     try {
       VectorizationContext vc = new ValidatorVectorizationContext(hiveConf);
       if (vc.getVectorExpression(desc, mode) == null) {
         // TODO: this cannot happen - VectorizationContext throws in such cases.
-        setExpressionIssue(expressionTitle, "getVectorExpression returned null");
+        LOG.info("getVectorExpression returned null");
         return false;
       }
     } catch (Exception e) {
       if (e instanceof HiveException) {
-        setExpressionIssue(expressionTitle, e.getMessage());
+        LOG.info(e.getMessage());
       } else {
-        String issue = "exception: " + VectorizationContext.getStackTraceAsSingleLine(e);
-        setExpressionIssue(expressionTitle, issue);
+        if (LOG.isDebugEnabled()) {
+          // Show stack trace.
+          LOG.debug("Failed to vectorize", e);
+        } else {
+          LOG.info("Failed to vectorize", e.getMessage());
+        }
       }
       return false;
     }
@@ -2129,9 +1905,9 @@ public class Vectorizer implements PhysicalPlanResolver {
     }
   }
 
-  public static ObjectInspector.Category aggregationOutputCategory(VectorAggregateExpression vectorAggrExpr) {
+  private boolean validateAggregationIsPrimitive(VectorAggregateExpression vectorAggrExpr) {
     ObjectInspector outputObjInspector = vectorAggrExpr.getOutputObjectInspector();
-    return outputObjInspector.getCategory();
+    return (outputObjInspector.getCategory() == ObjectInspector.Category.PRIMITIVE);
   }
 
   private Pair<Boolean,Boolean> validateAggregationDesc(AggregationDesc aggDesc, ProcessingMode processingMode,
@@ -2139,10 +1915,11 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     String udfName = aggDesc.getGenericUDAFName().toLowerCase();
     if (!supportedAggregationUdfs.contains(udfName)) {
-      setExpressionIssue("Aggregation Function", "UDF " + udfName + " not supported");
+      LOG.info("Cannot vectorize groupby aggregate expression: UDF " + udfName + " not supported");
       return new Pair<Boolean,Boolean>(false, false);
     }
-    if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters(), "Aggregation Function UDF " + udfName + " parameter")) {
+    if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters())) {
+      LOG.info("Cannot vectorize groupby aggregate expression: UDF parameters not supported");
       return new Pair<Boolean,Boolean>(false, false);
     }
 
@@ -2156,7 +1933,6 @@ public class Vectorizer implements PhysicalPlanResolver {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Vectorization of aggreation should have succeeded ", e);
       }
-      setExpressionIssue("Aggregation Function", "Vectorization of aggreation should have succeeded " + e);
       return new Pair<Boolean,Boolean>(false, false);
     }
     if (LOG.isDebugEnabled()) {
@@ -2164,12 +1940,11 @@ public class Vectorizer implements PhysicalPlanResolver {
           " vector expression " + vectorAggrExpr.toString());
     }
 
-    ObjectInspector.Category outputCategory = aggregationOutputCategory(vectorAggrExpr);
-    boolean outputIsPrimitive = (outputCategory == ObjectInspector.Category.PRIMITIVE);
+    boolean outputIsPrimitive = validateAggregationIsPrimitive(vectorAggrExpr);
     if (processingMode == ProcessingMode.MERGE_PARTIAL &&
         hasKeys &&
         !outputIsPrimitive) {
-      setOperatorIssue("Vectorized Reduce MergePartial GROUP BY keys can only handle aggregate outputs that are primitive types");
+      LOG.info("Vectorized Reduce MergePartial GROUP BY keys can only handle aggregate outputs that are primitive types");
       return new Pair<Boolean,Boolean>(false, false);
     }
 
@@ -2237,12 +2012,12 @@ public class Vectorizer implements PhysicalPlanResolver {
         if (smallTableIndices[i] < 0) {
           // Negative numbers indicate a column to be (deserialize) read from the small table's
           // LazyBinary value row.
-          setOperatorIssue("Vectorizer isBigTableOnlyResults smallTableIndices[i] < 0 returning false");
+          LOG.info("Vectorizer isBigTableOnlyResults smallTableIndices[i] < 0 returning false");
           return false;
         }
       }
     } else if (smallTableRetainSize > 0) {
-      setOperatorIssue("Vectorizer isBigTableOnlyResults smallTableRetainSize > 0 returning false");
+      LOG.info("Vectorizer isBigTableOnlyResults smallTableRetainSize > 0 returning false");
       return false;
     }
 
@@ -2251,21 +2026,20 @@ public class Vectorizer implements PhysicalPlanResolver {
   }
 
   Operator<? extends OperatorDesc> specializeMapJoinOperator(Operator<? extends OperatorDesc> op,
-        VectorizationContext vContext, MapJoinDesc desc, VectorMapJoinInfo vectorMapJoinInfo)
-            throws HiveException {
+        VectorizationContext vContext, MapJoinDesc desc) throws HiveException {
     Operator<? extends OperatorDesc> vectorOp = null;
     Class<? extends Operator<?>> opClass = null;
 
-    VectorMapJoinDesc vectorDesc = (VectorMapJoinDesc) desc.getVectorDesc();
-
-    HashTableImplementationType hashTableImplementationType = HashTableImplementationType.NONE;
-    HashTableKind hashTableKind = HashTableKind.NONE;
-    HashTableKeyType hashTableKeyType = HashTableKeyType.NONE;
-    OperatorVariation operatorVariation = OperatorVariation.NONE;
+    VectorMapJoinDesc.HashTableImplementationType hashTableImplementationType = HashTableImplementationType.NONE;
+    VectorMapJoinDesc.HashTableKind hashTableKind = HashTableKind.NONE;
+    VectorMapJoinDesc.HashTableKeyType hashTableKeyType = HashTableKeyType.NONE;
 
-    if (vectorDesc.getIsFastHashTableEnabled()) {
+    if (HiveConf.getBoolVar(hiveConf,
+              HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) {
       hashTableImplementationType = HashTableImplementationType.FAST;
     } else {
+      // Restrict to using BytesBytesMultiHashMap via MapJoinBytesTableContainer or
+      // HybridHashTableContainer.
       hashTableImplementationType = HashTableImplementationType.OPTIMIZED;
     }
 
@@ -2287,31 +2061,20 @@ public class Vectorizer implements PhysicalPlanResolver {
       Map<Byte, List<ExprNodeDesc>> keyExprs = desc.getKeys();
       List<ExprNodeDesc> bigTableKeyExprs = keyExprs.get(posBigTable);
       if (bigTableKeyExprs.size() == 1) {
-        TypeInfo typeInfo = bigTableKeyExprs.get(0).getTypeInfo();
-        LOG.info("Vectorizer vectorizeOperator map join typeName " + typeInfo.getTypeName());
-        switch (((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory()) {
-        case BOOLEAN:
+        String typeName = bigTableKeyExprs.get(0).getTypeString();
+        LOG.info("Vectorizer vectorizeOperator map join typeName " + typeName);
+        if (typeName.equals("boolean")) {
           hashTableKeyType = HashTableKeyType.BOOLEAN;
-          break;
-        case BYTE:
+        } else if (typeName.equals("tinyint")) {
           hashTableKeyType = HashTableKeyType.BYTE;
-          break;
-        case SHORT:
+        } else if (typeName.equals("smallint")) {
           hashTableKeyType = HashTableKeyType.SHORT;
-          break;
-        case INT:
+        } else if (typeName.equals("int")) {
           hashTableKeyType = HashTableKeyType.INT;
-          break;
-        case LONG:
+        } else if (typeName.equals("bigint") || typeName.equals("long")) {
           hashTableKeyType = HashTableKeyType.LONG;
-          break;
-        case STRING:
-        case CHAR:
-        case VARCHAR:
-        case BINARY:
+        } else if (VectorizationContext.isStringFamily(typeName)) {
           hashTableKeyType = HashTableKeyType.STRING;
-        default:
-          // Stay with multi-key.
         }
       }
     }
@@ -2319,20 +2082,16 @@ public class Vectorizer implements PhysicalPlanResolver {
     switch (joinType) {
     case JoinDesc.INNER_JOIN:
       if (!isInnerBigOnly) {
-        operatorVariation = OperatorVariation.INNER;
         hashTableKind = HashTableKind.HASH_MAP;
       } else {
-        operatorVariation = OperatorVariation.INNER_BIG_ONLY;
         hashTableKind = HashTableKind.HASH_MULTISET;
       }
       break;
     case JoinDesc.LEFT_OUTER_JOIN:
     case JoinDesc.RIGHT_OUTER_JOIN:
-      operatorVariation = OperatorVariation.OUTER;
       hashTableKind = HashTableKind.HASH_MAP;
       break;
     case JoinDesc.LEFT_SEMI_JOIN:
-      operatorVariation = OperatorVariation.LEFT_SEMI;
       hashTableKind = HashTableKind.HASH_SET;
       break;
     default:
@@ -2347,84 +2106,86 @@ public class Vectorizer implements PhysicalPlanResolver {
     case SHORT:
     case INT:
     case LONG:
-      switch (operatorVariation) {
-      case INNER:
-        opClass = VectorMapJoinInnerLongOperator.class;
+      switch (joinType) {
+      case JoinDesc.INNER_JOIN:
+        if (!isInnerBigOnly) {
+          opClass = VectorMapJoinInnerLongOperator.class;
+        } else {
+          opClass = VectorMapJoinInnerBigOnlyLongOperator.class;
+        }
         break;
-      case INNER_BIG_ONLY:
-        opClass = VectorMapJoinInnerBigOnlyLongOperator.class;
+      case JoinDesc.LEFT_OUTER_JOIN:
+      case JoinDesc.RIGHT_OUTER_JOIN:
+        opClass = VectorMapJoinOuterLongOperator.class;
         break;
-      case LEFT_SEMI:
+      case JoinDesc.LEFT_SEMI_JOIN:
         opClass = VectorMapJoinLeftSemiLongOperator.class;
         break;
-      case OUTER:
-        opClass = VectorMapJoinOuterLongOperator.class;
-        break;
       default:
-        throw new HiveException("Unknown operator variation " + operatorVariation);
+        throw new HiveException("Unknown join type " + joinType);
       }
       break;
     case STRING:
-      switch (operatorVariation) {
-      case INNER:
-        opClass = VectorMapJoinInnerStringOperator.class;
+      switch (joinType) {
+      case JoinDesc.INNER_JOIN:
+        if (!isInnerBigOnly) {
+          opClass = VectorMapJoinInnerStringOperator.class;
+        } else {
+          opClass = VectorMapJoinInnerBigOnlyStringOperator.class;
+        }
         break;
-      case INNER_BIG_ONLY:
-        opClass = VectorMapJoinInnerBigOnlyStringOperator.class;
+      case JoinDesc.LEFT_OUTER_JOIN:
+      case JoinDesc.RIGHT_OUTER_JOIN:
+        opClass = VectorMapJoinOuterStringOperator.class;
         break;
-      case LEFT_SEMI:
+      case JoinDesc.LEFT_SEMI_JOIN:
         opClass = VectorMapJoinLeftSemiStringOperator.class;
         break;
-      case OUTER:
-        opClass = VectorMapJoinOuterStringOperator.class;
-        break;
       default:
-        throw new HiveException("Unknown operator variation " + operatorVariation);
+        throw new HiveException("Unknown join type " + joinType);
       }
       break;
     case MULTI_KEY:
-      switch (operatorVariation) {
-      case INNER:
-        opClass = VectorMapJoinInnerMultiKeyOperator.class;
+      switch (joinType) {
+      case JoinDesc.INNER_JOIN:
+        if (!isInnerBigOnly) {
+          opClass = VectorMapJoinInnerMultiKeyOperator.class;
+        } else {
+          opClass = VectorMapJoinInnerBigOnlyMultiKeyOperator.class;
+        }
         break;
-      case INNER_BIG_ONLY:
-        opClass = VectorMapJoinInnerBigOnlyMultiKeyOperator.class;
+      case JoinDesc.LEFT_OUTER_JOIN:
+      case JoinDesc.RIGHT_OUTER_JOIN:
+        opClass = VectorMapJoinOuterMultiKeyOperator.class;
         break;
-      case LEFT_SEMI:
+      case JoinDesc.LEFT_SEMI_JOIN:
         opClass = VectorMapJoinLeftSemiMultiKeyOperator.class;
         break;
-      case OUTER:
-        opClass = VectorMapJoinOuterMultiKeyOperator.class;
-        break;
       default:
-        throw new HiveException("Unknown operator variation " + operatorVariation);
+        throw new HiveException("Unknown join type " + joinType);
       }
       break;
-    default:
-      throw new RuntimeException("Unexpected hash table key type " + hashTableKeyType.name());
     }
 
+    vectorOp = OperatorFactory.getVectorOperator(
+        opClass, op.getCompilationOpContext(), op.getConf(), vContext);
+    LOG.info("Vectorizer vectorizeOperator map join class " + vectorOp.getClass().getSimpleName());
+
     boolean minMaxEnabled = HiveConf.getBoolVar(hiveConf,
         HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED);
 
+    VectorMapJoinDesc vectorDesc = desc.getVectorDesc();
     vectorDesc.setHashTableImplementationType(hashTableImplementationType);
     vectorDesc.setHashTableKind(hashTableKind);
     vectorDesc.setHashTableKeyType(hashTableKeyType);
-    vectorDesc.setOperatorVariation(operatorVariation);
     vectorDesc.setMinMaxEnabled(minMaxEnabled);
-    vectorDesc.setVectorMapJoinInfo(vectorMapJoinInfo);
-
-    vectorOp = OperatorFactory.getVectorOperator(
-        opClass, op.getCompilationOpContext(), op.getConf(), vContext);
-    LOG.info("Vectorizer vectorizeOperator map join class " + vectorOp.getClass().getSimpleName());
-
     return vectorOp;
   }
 
-  public static boolean onExpressionHasNullSafes(MapJoinDesc desc) {
+  private boolean onExpressionHasNullSafes(MapJoinDesc desc) {
     boolean[] nullSafes = desc.getNullSafes();
     if (nullSafes == null) {
-      return false;
+	return false;
     }
     for (boolean nullSafe : nullSafes) {
       if (nullSafe) {
@@ -2435,372 +2196,53 @@ public class Vectorizer implements PhysicalPlanResolver {
   }
 
   private boolean canSpecializeMapJoin(Operator<? extends OperatorDesc> op, MapJoinDesc desc,
-      boolean isTezOrSpark, VectorizationContext vContext, VectorMapJoinInfo vectorMapJoinInfo)
-          throws HiveException {
-
-    Preconditions.checkState(op instanceof MapJoinOperator);
-
-    // Allocate a VectorReduceSinkDesc initially with implementation type NONE so EXPLAIN
-    // can report this operator was vectorized, but not native.  And, the conditions.
-    VectorMapJoinDesc vectorDesc = new VectorMapJoinDesc();
-    desc.setVectorDesc(vectorDesc);
-
-    boolean isVectorizationMapJoinNativeEnabled = HiveConf.getBoolVar(hiveConf,
-        HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED);
-
-    String engine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE);
-
-    boolean oneMapJoinCondition = (desc.getConds().length == 1);
-
-    boolean hasNullSafes = onExpressionHasNullSafes(desc);
-
-    byte posBigTable = (byte) desc.getPosBigTable();
-
-    // Since we want to display all the met and not met conditions in EXPLAIN, we determine all
-    // information first....
-
-    List<ExprNodeDesc> keyDesc = desc.getKeys().get(posBigTable);
-    VectorExpression[] allBigTableKeyExpressions = vContext.getVectorExpressions(keyDesc);
-    final int allBigTableKeyExpressionsLength = allBigTableKeyExpressions.length;
-    boolean isEmptyKey = (allBigTableKeyExpressionsLength == 0);
+      boolean isTez) {
 
-    boolean supportsKeyTypes = true;  // Assume.
-    HashSet<String> notSupportedKeyTypes = new HashSet<String>();
+    boolean specialize = false;
 
-    // Since a key expression can be a calculation and the key will go into a scratch column,
-    // we need the mapping and type information.
-    int[] bigTableKeyColumnMap = new int[allBigTableKeyExpressionsLength];
-    String[] bigTableKeyColumnNames = new String[allBigTableKeyExpressionsLength];
-    TypeInfo[] bigTableKeyTypeInfos = new TypeInfo[allBigTableKeyExpressionsLength];
-    ArrayList<VectorExpression> bigTableKeyExpressionsList = new ArrayList<VectorExpression>();
-    VectorExpression[] bigTableKeyExpressions;
-    for (int i = 0; i < allBigTableKeyExpressionsLength; i++) {
-      VectorExpression ve = allBigTableKeyExpressions[i];
-      if (!IdentityExpression.isColumnOnly(ve)) {
-        bigTableKeyExpressionsList.add(ve);
-      }
-      bigTableKeyColumnMap[i] = ve.getOutputColumn();
-
-      ExprNodeDesc exprNode = keyDesc.get(i);
-      bigTableKeyColumnNames[i] = exprNode.toString();
-
-      TypeInfo typeInfo = exprNode.getTypeInfo();
-      // Verify we handle the key column types for an optimized table.  This is the effectively the
-      // same check used in HashTableLoader.
-      if (!MapJoinKey.isSupportedField(typeInfo)) {
-        supportsKeyTypes = false;
-        Category category = typeInfo.getCategory();
-        notSupportedKeyTypes.add(
-            (category != Category.PRIMITIVE ? category.toString() :
-              ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory().toString()));
-      }
-      bigTableKeyTypeInfos[i] = typeInfo;
-    }
-    if (bigTableKeyExpressionsList.size() == 0) {
-      bigTableKeyExpressions = null;
-    } else {
-      bigTableKeyExpressions = bigTableKeyExpressionsList.toArray(new VectorExpression[0]);
-    }
-
-    List<ExprNodeDesc> bigTableExprs = desc.getExprs().get(posBigTable);
-    VectorExpression[] allBigTableValueExpressions = vContext.getVectorExpressions(bigTableExprs);
-
-    boolean isFastHashTableEnabled =
+    if (op instanceof MapJoinOperator &&
         HiveConf.getBoolVar(hiveConf,
-            HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED);
-    vectorDesc.setIsFastHashTableEnabled(isFastHashTableEnabled);
+            HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED)) {
 
-    // Especially since LLAP is prone to turn it off in the MapJoinDesc in later
-    // physical optimizer stages...
-    boolean isHybridHashJoin = desc.isHybridHashJoin();
-    vectorDesc.setIsHybridHashJoin(isHybridHashJoin);
+      // Currently, only under Tez and non-N-way joins.
+      if (isTez && desc.getConds().length == 1 && !onExpressionHasNullSafes(desc)) {
 
-    /*
-     * Populate vectorMapJoininfo.
-     */
+        // Ok, all basic restrictions satisfied so far...
+        specialize = true;
 
-    /*
-     * Similarly, we need a mapping since a value expression can be a calculation and the value
-     * will go into a scratch column.
-     */
-    int[] bigTableValueColumnMap = new int[allBigTableValueExpressions.length];
-    String[] bigTableValueColumnNames = new String[allBigTableValueExpressions.length];
-    TypeInfo[] bigTableValueTypeInfos = new TypeInfo[allBigTableValueExpressions.length];
-    ArrayList<VectorExpression> bigTableValueExpressionsList = new ArrayList<VectorExpression>();
-    VectorExpression[] bigTableValueExpressions;
-    for (int i = 0; i < bigTableValueColumnMap.length; i++) {
-      VectorExpression ve = allBigTableValueExpressions[i];
-      if (!IdentityExpression.isColumnOnly(ve)) {
-        bigTableValueExpressionsList.add(ve);
-      }
-      bigTableValueColumnMap[i] = ve.getOutputColumn();
+        if (!HiveConf.getBoolVar(hiveConf,
+            HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) {
 
-      ExprNodeDesc exprNode = bigTableExprs.get(i);
-      bigTableValueColumnNames[i] = exprNode.toString();
-      bigTableValueTypeInfos[i] = exprNode.getTypeInfo();
-    }
-    if (bigTableValueExpressionsList.size() == 0) {
-      bigTableValueExpressions = null;
-    } else {
-      bigTableValueExpressions = bigTableValueExpressionsList.toArray(new VectorExpression[0]);
-    }
-
-    vectorMapJoinInfo.setBigTableKeyColumnMap(bigTableKeyColumnMap);
-    vectorMapJoinInfo.setBigTableKeyColumnNames(bigTableKeyColumnNames);
-    vectorMapJoinInfo.setBigTableKeyTypeInfos(bigTableKeyTypeInfos);
-    vectorMapJoinInfo.setBigTableKeyExpressions(bigTableKeyExpressions);
-
-    vectorMapJoinInfo.setBigTableValueColumnMap(bigTableValueColumnMap);
-    vectorMapJoinInfo.setBigTableValueColumnNames(bigTableValueColumnNames);
-    vectorMapJoinInfo.setBigTableValueTypeInfos(bigTableValueTypeInfos);
-    vectorMapJoinInfo.setBigTableValueExpressions(bigTableValueExpressions);
+          // We are using the optimized hash table we have further
+          // restrictions (using optimized and key type).
 
-    /*
-     * Small table information.
-     */
-    VectorColumnOutputMapping bigTableRetainedMapping =
-        new VectorColumnOutputMapping("Big Table Retained Mapping");
-
-    VectorColumnOutputMapping bigTableOuterKeyMapping =
-        new VectorColumnOutputMapping("Big Table Outer Key Mapping");
-
-    // The order of the fields in the LazyBinary small table value must be used, so
-    // we use the source ordering flavor for the mapping.
-    VectorColumnSourceMapping smallTableMapping =
-        new VectorColumnSourceMapping("Small Table Mapping");
-
-    Byte[] order = desc.getTagOrder();
-    Byte posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]);
-    boolean isOuterJoin = !desc.getNoOuterJoin();
-
-    /*
-     * Gather up big and small table output result information from the MapJoinDesc.
-     */
-    List<Integer> bigTableRetainList = desc.getRetainList().get(posBigTable);
-    int bigTableRetainSize = bigTableRetainList.size();
-
-    int[] smallTableIndices;
-    int smallTableIndicesSize;
-    List<ExprNodeDesc> smallTableExprs = desc.getExprs().get(posSingleVectorMapJoinSmallTable);
-    if (desc.getValueIndices() != null && desc.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) {
-      smallTableIndices = desc.getValueIndices().get(posSingleVectorMapJoinSmallTable);
-      smallTableIndicesSize = smallTableIndices.length;
-    } else {
-      smallTableIndices = null;
-      smallTableIndicesSize = 0;
-    }
-
-    List<Integer> smallTableRetainList = desc.getRetainList().get(posSingleVectorMapJoinSmallTable);
-    int smallTableRetainSize = smallTableRetainList.size();
-
-    int smallTableResultSize = 0;
-    if (smallTableIndicesSize > 0) {
-      smallTableResultSize = smallTableIndicesSize;
-    } else if (smallTableRetainSize > 0) {
-      smallTableResultSize = smallTableRetainSize;
-    }
-
-    /*
-     * Determine the big table retained mapping first so we can optimize out (with
-     * projection) copying inner join big table keys in the subsequent small table results section.
-     */
-
-    // We use a mapping object here so we can build the projection in any order and
-    // get the ordered by 0 to n-1 output columns at the end.
-    //
-    // Also, to avoid copying a big table key into the small table result area for inner joins,
-    // we reference it with the projection so there can be duplicate output columns
-    // in the projection.
-    VectorColumnSourceMapping projectionMapping = new VectorColumnSourceMapping("Projection Mapping");
-
-    int nextOutputColumn = (order[0] == posBigTable ? 0 : smallTableResultSize);
-    for (int i = 0; i < bigTableRetainSize; i++) {
-
-      // Since bigTableValueExpressions may do a calculation and produce a scratch column, we
-      // need to map to the right batch column.
-
-      int retainColumn = bigTableRetainList.get(i);
-      int batchColumnIndex = bigTableValueColumnMap[retainColumn];
-      TypeInfo typeInfo = bigTableValueTypeInfos[i];
-
-      // With this map we project the big table batch to make it look like an output batch.
-      projectionMapping.add(nextOutputColumn, batchColumnIndex, typeInfo);
-
-      // Collect columns we copy from the big table batch to the overflow batch.
-      if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
-        // Tolerate repeated use of a big table column.
-        bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeInfo);
-      }
-
-      nextOutputColumn++;
-    }
-
-    /*
-     * Now determine the small table results.
-     */
-    boolean smallTableExprVectorizes = true;
-
-    int firstSmallTableOutputColumn;
-    firstSmallTableOutputColumn = (order[0] == posBigTable ? bigTableRetainSize : 0);
-    int smallTableOutputCount = 0;
-    nextOutputColumn = firstSmallTableOutputColumn;
-
-    // Small table indices has more information (i.e. keys) than retain, so use it if it exists...
-    String[] bigTableRetainedNames;
-    if (smallTableIndicesSize > 0) {
-      smallTableOutputCount = smallTableIndicesSize;
-      bigTableRetainedNames = new String[smallTableOutputCount];
-
-      for (int i = 0; i < smallTableIndicesSize; i++) {
-        if (smallTableIndices[i] >= 0) {
-
-          // Zero and above numbers indicate a big table key is needed for
-          // small table result "area".
-
-          int keyIndex = smallTableIndices[i];
-
-          // Since bigTableKeyExpressions may do a calculation and produce a scratch column, we
-          // need to map the right column.
-          int batchKeyColumn = bigTableKeyColumnMap[keyIndex];
-          bigTableRetainedNames[i] = bigTableKeyColumnNames[keyIndex];
-          TypeInfo typeInfo = bigTableKeyTypeInfos[keyIndex];
-
-          if (!isOuterJoin) {
-
-            // Optimize inner join keys of small table results.
-
-            // Project the big table key into the small table result "area".
-            projectionMapping.add(nextOutputColumn, batchKeyColumn, typeInfo);
-
-            if (!bigTableRetainedMapping.containsOutputColumn(batchKeyColumn)) {
-              // If necessary, copy the big table key into the overflow batch's small table
-              // result "area".
-              bigTableRetainedMapping.add(batchKeyColumn, batchKeyColumn, typeInfo);
-            }
+          if (!HiveConf.getBoolVar(hiveConf,
+              HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE)) {
+            specialize = false;
           } else {
-
-            // For outer joins, since the small table key can be null when there is no match,
-            // we must have a physical (scratch) column for those keys.  We cannot use the
-            // projection optimization used by inner joins above.
-
-            int scratchColumn = vContext.allocateScratchColumn(typeInfo.getTypeName());
-            projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
-
-            bigTableRetainedMapping.add(batchKeyColumn, scratchColumn, typeInfo);
-
-            bigTableOuterKeyMapping.add(batchKeyColumn, scratchColumn, typeInfo);
+            byte posBigTable = (byte) desc.getPosBigTable();
+            Map<Byte, List<ExprNodeDesc>> keyExprs = desc.getKeys();
+            List<ExprNodeDesc> bigTableKeyExprs = keyExprs.get(posBigTable);
+            for (ExprNodeDesc exprNodeDesc : bigTableKeyExprs) {
+              String typeName = exprNodeDesc.getTypeString();
+              if (!MapJoinKey.isSupportedField(typeName)) {
+                specialize = false;
+                break;
+              }
+            }
           }
         } else {
 
-          // Negative numbers indicate a column to be (deserialize) read from the small table's
-          // LazyBinary value row.
-          int smallTableValueIndex = -smallTableIndices[i] - 1;
+          // With the fast hash table implementation, we currently do not support
+          // Hybrid Grace Hash Join.
 
-          ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
-          if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
-            clearNotVectorizedReason();
-            smallTableExprVectorizes = false;
+          if (desc.isHybridHashJoin()) {
+            specialize = false;
           }
-
-          bigTableRetainedNames[i] = smallTableExprNode.toString();
-
-          TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
-
-          // Make a new big table scratch column for the small table value.
-          int scratchColumn = vContext.allocateScratchColumn(typeInfo.getTypeName());
-          projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
-
-          smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
-        }
-        nextOutputColumn++;
-      }
-    } else if (smallTableRetainSize > 0) {
-      smallTableOutputCount = smallTableRetainSize;
-      bigTableRetainedNames = new String[smallTableOutputCount];
-
-      // Only small table values appear in join output result.
-
-      for (int i = 0; i < smallTableRetainSize; i++) {
-        int smallTableValueIndex = smallTableRetainList.get(i);
-
-        ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
-        if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
-          clearNotVectorizedReason();
-          smallTableExprVectorizes = false;
         }
-
-        bigTableRetainedNames[i] = smallTableExprNode.toString();
-
-        // Make a new big table scratch column for the small table value.
-        TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
-        int scratchColumn = vContext.allocateScratchColumn(typeInfo.getTypeName());
-
-        projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
-
-        smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
-        nextOutputColumn++;
       }
-    } else {
-      bigTableRetainedNames = new String[0];
-    }
-
-    // Remember the condition variables for EXPLAIN regardless.
-    vectorDesc.setIsVectorizationMapJoinNativeEnabled(isVectorizationMapJoinNativeEnabled);
-    vectorDesc.setEngine(engine);
-    vectorDesc.setOneMapJoinCondition(oneMapJoinCondition);
-    vectorDesc.setHasNullSafes(hasNullSafes);
-    vectorDesc.setSupportsKeyTypes(supportsKeyTypes);
-    if (!supportsKeyTypes) {
-      vectorDesc.setNotSupportedKeyTypes(new ArrayList(notSupportedKeyTypes));
-    }
-    vectorDesc.setIsEmptyKey(isEmptyKey);
-    vectorDesc.setSmallTableExprVectorizes(smallTableExprVectorizes);
-
-    // Currently, only under Tez and non-N-way joins.
-

<TRUNCATED>

[02/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index eb61044..bd9b852 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -66,21 +66,105 @@ POSTHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
@@ -100,21 +184,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -134,21 +308,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6058
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -168,21 +432,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6248
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col0 < 100)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 100)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -202,7 +556,7 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -211,7 +565,7 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -220,7 +574,117 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: tinyint)
+                          1 _col0 (type: tinyint)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 7329 Data size: 2451 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -306,21 +770,105 @@ POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
@@ -340,21 +888,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 39112
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -374,21 +1012,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 11171
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {((_col0 pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((_col0 pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -408,21 +1136,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 14371
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col0 < 3)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 3)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -442,7 +1260,7 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 17792
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -451,7 +1269,7 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -460,7 +1278,117 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 7329 Data size: 3335 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
index 5bc0f6e..5497426 100644
--- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
@@ -97,77 +97,32 @@ POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+PREHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS `none_z_rtrim_str`
        FROM over1korc LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+POSTHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS `none_z_rtrim_str`
        FROM over1korc LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
-                  Select Operator
-                    expressions: s (type: string), concat(concat('      ', s), '      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      '))), '|') (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [7, 12, 11]
-                        selectExpressions: StringGroupColConcatStringScalar(col 11, val       )(children: StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 12:String_Family, StringGroupColConcatStringScalar(col 13, val |)(children: StringScalarConcatStringGroupCol(val |, col 11)(children: StringRTrim(col 13)(children: StringGroupColConcatStringScalar(col 11, val       )(children: StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 13:String_Family) -> 11:String) -> 13:String_Family) -> 11:String_Family
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Limit
-                      Number of rows: 20
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 20
       Processor Tree:
-        ListSink
+        TableScan
+          alias: over1korc
+          Select Operator
+            expressions: s (type: string), concat(concat('      ', s), '      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      '))), '|') (type: string)
+            outputColumnNames: _col0, _col1, _col2
+            Limit
+              Number of rows: 20
+              ListSink
 
 PREHOOK: query: SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
@@ -310,24 +265,20 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -345,25 +296,11 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), UDFToString(year(dt))) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [19]
-                        selectExpressions: StringGroupConcatColCol(col 17, col 18)(children: StringGroupColConcatStringScalar(col 18, val -)(children: StringScalarConcatStringGroupCol(val Quarter , col 17)(children: CastLongToString(col 13)(children: CastDoubleToLong(col 15)(children: DoubleColAddDoubleScalar(col 16, val 1.0)(children: DoubleColDivideDoubleScalar(col 15, val 3.0)(children: CastLongToDouble(col 14)(children: LongColSubtractLongScalar(col 13, val 1)(children: VectorUDFMonthDate(col 12, field MONTH) -> 13:long) -> 14:long) -> 15:double) -> 16:double) -> 15:double) -> 13:long) -> 17:String) -> 18:String_Family) -> 17:String_Family, CastLongToString(col 13)(children: VectorUDFYearDate(col 12, field YEAR) -> 13:long) -> 18:String) -> 19:String_Family
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 19
-                          native: false
-                          projectedOutputColumns: []
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0
@@ -372,39 +309,13 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
@@ -412,42 +323,20 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 50
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 22950 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
index 1c8e479..9a46ee1 100644
--- a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
@@ -121,16 +121,12 @@ POSTHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi v
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_lazy_binary_columnar
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -146,23 +142,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Select Operator
                     expressions: CAST( t AS varchar(10)) (type: varchar(10)), CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), CAST( s AS varchar(50)) (type: varchar(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19]
-                        selectExpressions: CastLongToVarChar(col 0, maxLength 10) -> 13:VarChar, CastLongToVarChar(col 1, maxLength 10) -> 14:VarChar, CastLongToVarChar(col 2, maxLength 20) -> 15:VarChar, CastLongToVarChar(col 3, maxLength 30) -> 16:VarChar, VectorUDFAdaptor(CAST( f AS varchar(20))) -> 17:varchar(20), VectorUDFAdaptor(CAST( d AS varchar(20))) -> 18:varchar(20), CastStringGroupToVarChar(col 8, maxLength 50) -> 19:VarChar
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -170,14 +155,6 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe
                           name: default.varchar_lazy_binary_columnar
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Move Operator


[12/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
index d6c405e..a14d515 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
@@ -30,8 +30,7 @@ PREHOOK: query: -- SORT_QUERY_RESULTS
 -- ArithmeticOps: Add, Multiply, Subtract, Divide
 -- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(cint),
+EXPLAIN SELECT AVG(cint),
        (AVG(cint) + -3728),
        (-((AVG(cint) + -3728))),
        (-((-((AVG(cint) + -3728))))),
@@ -99,8 +98,7 @@ POSTHOOK: query: -- SORT_QUERY_RESULTS
 -- ArithmeticOps: Add, Multiply, Subtract, Divide
 -- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(cint),
+EXPLAIN SELECT AVG(cint),
        (AVG(cint) + -3728),
        (-((AVG(cint) + -3728))),
        (-((-((AVG(cint) + -3728))))),
@@ -136,10 +134,6 @@ WHERE  ((762 = cbigint)
                     AND ((79.553 != cint)
                          AND (cboolean2 != cboolean1)))))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -157,33 +151,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val 762, col 3) -> boolean, FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 12, col 4)(children: CastLongToFloatViaLongToDouble(col 1) -> 12:double) -> boolean, FilterDoubleColGreaterDoubleScalar(col 12, val -5.0)(children: CastTimestampToDouble(col 9) -> 12:double) -> boolean, FilterDoubleColNotEqualDoubleColumn(col 5, col 12)(children: CastLongToDouble(col 2) -> 12:double) -> boolean) -> boolean, FilterStringGroupColEqualStringScalar(col 6, val a) -> boolean, FilterExprAndExpr(children: FilterDecimalColLessEqualDecimalScalar(col 13, val -1.389)(children: CastLongToDecimal(col 3) -> 13:decimal(22,3)) -> boolean, FilterStringGroupColNotEqualStringScalar(col 7, val a) -> boolean, FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14)(children: CastLongToDecimal(col 2) -> 14:decimal(13,3)) -> boolean, FilterLongColNotEqualLongColumn(col 11, col 10) -> boolean) 
 -> boolean) -> boolean
                     predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1))) (type: boolean)
                     Statistics: Num rows: 5466 Data size: 1157380 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
                       outputColumnNames: cint, cdouble, csmallint, cfloat, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 5, 1, 4, 0]
                       Statistics: Num rows: 5466 Data size: 1157380 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: avg(cint), sum(cdouble), stddev_pop(cint), stddev_samp(csmallint), var_samp(cint), avg(cfloat), stddev_samp(cint), min(ctinyint), count(csmallint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFAvgLong(col 2) -> struct<count:bigint,sum:double>, VectorUDAFSumDouble(col 5) -> double, VectorUDAFStdPopLong(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarSampLong(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFAvgDouble(col 4) -> struct<count:bigint,sum:double>, VectorUDAFStdSampLong(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMinLong(col 0) -> tinyint, VectorUDAFCount(col 1) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(col 2) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarSampLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgDouble(col 4) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 492 Basic stats: COMPLETE Column stats: COMPLETE
@@ -193,21 +169,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: struct<count:bigint,sum:double,input:int>), _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,input:float>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: tinyint), _col8 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:int> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_samp(VALUE._col3), var_samp(VALUE._col4), avg(VALUE._col5), stddev_samp(VALUE._col6), min(VALUE._col7), count(VALUE._col8)
@@ -314,8 +277,7 @@ PREHOOK: query: -- TargetTypeClasses: Long, Bool, Double, String, Timestamp
 -- ArithmeticOps: Divide, Multiply, Remainder, Subtract
 -- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT MAX(cint),
+EXPLAIN SELECT MAX(cint),
        (MAX(cint) / -3728),
        (MAX(cint) * -3728),
        VAR_POP(cbigint),
@@ -353,8 +315,7 @@ POSTHOOK: query: -- TargetTypeClasses: Long, Bool, Double, String, Timestamp
 -- ArithmeticOps: Divide, Multiply, Remainder, Subtract
 -- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT MAX(cint),
+EXPLAIN SELECT MAX(cint),
        (MAX(cint) / -3728),
        (MAX(cint) * -3728),
        VAR_POP(cbigint),
@@ -387,10 +348,6 @@ WHERE  (((cbigint <= 197)
            OR ((cfloat > 79.553)
                AND (cstring2 LIKE '10%')))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -408,33 +365,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2036734 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 3, val 197) -> boolean, FilterLongColLessLongColumn(col 2, col 3)(children: col 2) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5, val -26.28) -> boolean, FilterDoubleColGreaterDoubleColumn(col 12, col 5)(children: CastLongToDouble(col 1) -> 12:double) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 12, col 4)(children: CastLongToFloatViaLongToDouble(col 0) -> 12:double) -> boolean, FilterStringColRegExpStringScalar(col 6, pattern .*ss.*) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 4, val 79.5530014038086) -> boolean, FilterStringColLikeStringScalar(col 7, pattern 10%) -> boolean) -> boolean) -> boolean
                     predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                     Statistics: Num rows: 6826 Data size: 1131534 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
                       outputColumnNames: cint, cbigint, csmallint, cdouble, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 3, 1, 5, 0]
                       Statistics: Num rows: 6826 Data size: 1131534 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: max(cint), var_pop(cbigint), stddev_pop(csmallint), max(cdouble), avg(ctinyint), min(cint), min(cdouble), stddev_samp(csmallint), var_samp(cint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFMaxLong(col 2) -> int, VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopLong(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMaxDouble(col 5) -> double, VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double>, VectorUDAFMinLong(col 2) -> int, VectorUDAFMinDouble(col 5) -> double, VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarSampLong(col 2) -> struct<count:bigint,sum:double,variance:double>
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarSampLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 420 Basic stats: COMPLETE Column stats: COMPLETE
@@ -444,21 +383,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,input:tinyint>), _col5 (type: int), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF var_pop parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col1] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), var_pop(VALUE._col1), stddev_pop(VALUE._col2), max(VALUE._col3), avg(VALUE._col4), min(VALUE._col5), min(VALUE._col6), stddev_samp(VALUE._col7), var_samp(VALUE._col8)
@@ -559,8 +485,7 @@ PREHOOK: query: -- TargetTypeClasses: String, Long, Bool, Double, Timestamp
 -- ArithmeticOps: Subtract, Remainder, Multiply, Add
 -- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT VAR_POP(cbigint),
+EXPLAIN SELECT VAR_POP(cbigint),
        (-(VAR_POP(cbigint))),
        (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))),
        COUNT(*),
@@ -597,8 +522,7 @@ POSTHOOK: query: -- TargetTypeClasses: String, Long, Bool, Double, Timestamp
 -- ArithmeticOps: Subtract, Remainder, Multiply, Add
 -- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT VAR_POP(cbigint),
+EXPLAIN SELECT VAR_POP(cbigint),
        (-(VAR_POP(cbigint))),
        (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))),
        COUNT(*),
@@ -630,10 +554,6 @@ WHERE  ((ctimestamp1 = ctimestamp2)
                   AND ((ctimestamp2 IS NOT NULL)
                        AND (cstring2 > 'a'))))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -651,33 +571,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterTimestampColEqualTimestampColumn(col 8, col 9) -> boolean, FilterDoubleScalarEqualDoubleColumn(val 762.0, col 4) -> boolean, FilterStringGroupColEqualStringScalar(col 6, val ss) -> boolean, FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 1, col 3)(children: col 1) -> boolean, FilterLongScalarEqualLongColumn(val 1, col 11) -> boolean) -> boolean, FilterExprAndExpr(children: SelectColumnIsNotNull(col 10) -> boolean, SelectColumnIsNotNull(col 9) -> boolean, FilterStringGroupColGreaterStringScalar(col 7, val a) -> boolean) -> boolean) -> boolean
                     predicate: ((ctimestamp1 = ctimestamp2) or (762 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
                       outputColumnNames: cbigint, ctinyint, csmallint, cint, cdouble
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [3, 0, 1, 2, 5]
                       Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: var_pop(cbigint), count(), max(ctinyint), stddev_pop(csmallint), max(cint), stddev_samp(cdouble), count(ctinyint), avg(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint, VectorUDAFMaxLong(col 0) -> tinyint, VectorUDAFStdPopLong(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMaxLong(col 2) -> int, VectorUDAFStdSampDouble(col 5) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCount(col 0) -> bigint, VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double>
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopLong(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampDouble(col 5) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                         Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
@@ -687,21 +589,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: struct<count:bigint,sum:double,variance:double>), _col1 (type: bigint), _col2 (type: tinyint), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: int), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: bigint), _col7 (type: struct<count:bigint,sum:double,input:tinyint>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF var_pop parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: var_pop(VALUE._col0), count(VALUE._col1), max(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), stddev_samp(VALUE._col5), count(VALUE._col6), avg(VALUE._col7)
@@ -801,8 +690,7 @@ PREHOOK: query: -- TargetTypeClasses: String, Bool, Timestamp, Long, Double
 -- ArithmeticOps: Add, Divide, Remainder, Multiply
 -- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(ctinyint),
+EXPLAIN SELECT AVG(ctinyint),
        (AVG(ctinyint) + 6981),
        ((AVG(ctinyint) + 6981) + AVG(ctinyint)),
        MAX(cbigint),
@@ -829,8 +717,7 @@ POSTHOOK: query: -- TargetTypeClasses: String, Bool, Timestamp, Long, Double
 -- ArithmeticOps: Add, Divide, Remainder, Multiply
 -- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal
 -- GroupBy: NoGroupByProjectAggs
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT AVG(ctinyint),
+EXPLAIN SELECT AVG(ctinyint),
        (AVG(ctinyint) + 6981),
        ((AVG(ctinyint) + 6981) + AVG(ctinyint)),
        MAX(cbigint),
@@ -852,10 +739,6 @@ WHERE  (((ctimestamp2 <= ctimestamp1)
             AND (ctimestamp1 >= 0))
            OR (cfloat = 17))
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -873,33 +756,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2139070 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessEqualTimestampColumn(col 9, col 8) -> boolean, FilterDoubleColNotEqualDoubleColumn(col 12, col 5)(children: CastLongToDouble(col 3) -> 12:double) -> boolean, FilterStringScalarLessEqualStringGroupColumn(val ss, col 6) -> boolean) -> boolean, FilterExprAndExpr(children: FilterLongColLessLongColumn(col 1, col 0)(children: col 0) -> boolean, FilterDoubleColGreaterEqualDoubleScalar(col 12, val 0.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean) -> boolean, FilterDoubleColEqualDoubleScalar(col 4, val 17.0) -> boolean) -> boolean
                     predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17)) (type: boolean)
                     Statistics: Num rows: 2835 Data size: 493648 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
                       outputColumnNames: ctinyint, cbigint, cint, cfloat
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [0, 3, 2, 4]
                       Statistics: Num rows: 2835 Data size: 493648 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: avg(ctinyint), max(cbigint), stddev_samp(cint), var_pop(cint), var_pop(cbigint), max(cfloat)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double>, VectorUDAFMaxLong(col 3) -> bigint, VectorUDAFStdSampLong(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarPopLong(col 2) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFMaxDouble(col 4) -> float
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgLong(col 0) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopLong(col 2) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopLong(col 3) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: COMPLETE
@@ -909,21 +774,8 @@ STAGE PLANS:
                           value expressions: _col0 (type: struct<count:bigint,sum:double,input:tinyint>), _col1 (type: bigint), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: float)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:tinyint> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), max(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_pop(VALUE._col4), max(VALUE._col5)
@@ -1003,8 +855,7 @@ PREHOOK: query: -- TargetTypeClasses: Timestamp, String, Long, Double, Bool
 -- ArithmeticOps: Multiply, Subtract, Add, Divide
 -- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cdouble,
        ctimestamp2,
        cstring1,
@@ -1045,8 +896,7 @@ POSTHOOK: query: -- TargetTypeClasses: Timestamp, String, Long, Double, Bool
 -- ArithmeticOps: Multiply, Subtract, Add, Divide
 -- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cdouble,
        ctimestamp2,
        cstring1,
@@ -1082,10 +932,6 @@ WHERE  (((cstring1 RLIKE 'a.*')
 ORDER BY cint, cdouble, ctimestamp2, cstring1, cboolean2, ctinyint, cfloat, ctimestamp1, csmallint, cbigint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13
 LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1103,74 +949,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3056470 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterStringColRegExpStringScalar(col 6, pattern a.*) -> boolean, FilterStringColLikeStringScalar(col 7, pattern %ss%) -> boolean) -> boolean, FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val 1, col 11) -> boolean, FilterDecimalColLessDecimalScalar(col 12, val 79.553)(children: CastLongToDecimal(col 1) -> 12:decimal(8,3)) -> boolean, FilterLongScalarNotEqualLongColumn(val -257, col 0)(children: col 0) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 5, col 13)(children: CastLongToDouble(col 0) -> 13:double) -> boolean, FilterDoubleColGreaterEqualDoubleColumn(col 4, col 13)(children: CastLongToFloatViaLongToDouble(col 2) -> 13:double) -> boolean) -> boolean, FilterExprAndExpr(children: FilterLongColLessLongColumn(col 2, col 3)(children: col 2) -> boolean, FilterLongColGreaterLongColumn(col 0, col 3)(children: col 0) -> boolean
 ) -> boolean) -> boolean
                     predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (CAST( csmallint AS decimal(8,3)) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                     Statistics: Num rows: 9898 Data size: 2462086 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - CAST( cint AS decimal(10,0))) (type: decimal(14,3)), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - CAST( cint AS decimal(10,0))) - -26.28) (type: decimal(15,3)), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / CAST( ctinyint AS decimal(3,0))) (type: decimal(20,18))
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 5, 9, 6, 11, 0, 4, 8, 1, 3, 14, 15, 17, 18, 20, 22, 24, 26, 13, 23, 28, 19, 30]
-                          selectExpressions: LongScalarMultiplyLongColumn(val -3728, col 3) -> 14:long, LongColUnaryMinus(col 2) -> 15:long, DecimalScalarSubtractDecimalColumn(val -863.257, col 16)(children: CastLongToDecimal(col 2) -> 16:decimal(10,0)) -> 17:decimal(14,3), LongColUnaryMinus(col 1) -> 18:long, LongColSubtractLongColumn(col 1, col 19)(children: LongColUnaryMinus(col 1) -> 19:long) -> 20:long, LongColAddLongColumn(col 21, col 19)(children: LongColSubtractLongColumn(col 1, col 19)(children: LongColUnaryMinus(col 1) -> 19:long) -> 21:long, LongColUnaryMinus(col 1) -> 19:long) -> 22:long, DoubleColDivideDoubleColumn(col 13, col 23)(children: CastLongToDouble(col 2) -> 13:double, CastLongToDouble(col 2) -> 23:double) -> 24:double, DecimalColSubtractDecimalScalar(col 25, val -26.28)(children: DecimalScalarSubtractDecimalColumn(val -863.257, col 16)(children: CastLongToDecimal(col 2) -> 16:decimal(10,0)) -> 25:decimal(14,3)) -> 26:decimal(15,3), DoubleColUnaryMinus(col 4) -
 > 13:double, DoubleColMultiplyDoubleScalar(col 5, val -89010.0) -> 23:double, DoubleColDivideDoubleScalar(col 27, val 988888.0)(children: CastLongToDouble(col 0) -> 27:double) -> 28:double, LongColUnaryMinus(col 0) -> 19:long, DecimalScalarDivideDecimalColumn(val 79.553, col 29)(children: CastLongToDecimal(col 0) -> 29:decimal(3,0)) -> 30:decimal(20,18)
                       Statistics: Num rows: 9898 Data size: 5632662 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: decimal(14,3)), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: decimal(15,3)), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: decimal(20,18))
                         sort order: +++++++++++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 9898 Data size: 5632662 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: boolean), KEY.reducesinkkey5 (type: tinyint), KEY.reducesinkkey6 (type: float), KEY.reducesinkkey7 (type: timestamp), KEY.reducesinkkey8 (type: smallint), KEY.reducesinkkey9 (type: bigint), KEY.reducesinkkey10 (type: bigint), KEY.reducesinkkey11 (type: int), KEY.reducesinkkey12 (type: decimal(14,3)), KEY.reducesinkkey13 (type: smallint), KEY.reducesinkkey14 (type: smallint), KEY.reducesinkkey15 (type: smallint), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: decimal(15,3)), KEY.reducesinkkey18 (type: float), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: double), KEY.reducesinkkey21 (type: tinyint), KEY.reducesinkkey22 (type: decimal(20,18))
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                 Statistics: Num rows: 9898 Data size: 5632662 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 50
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 50 Data size: 28540 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 28540 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1314,8 +1118,7 @@ PREHOOK: query: -- TargetTypeClasses: Long, String, Double, Bool, Timestamp
 -- ArithmeticOps: Divide, Remainder, Subtract, Multiply
 -- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cbigint,
        cstring1,
        cboolean1,
@@ -1355,8 +1158,7 @@ POSTHOOK: query: -- TargetTypeClasses: Long, String, Double, Bool, Timestamp
 -- ArithmeticOps: Divide, Remainder, Subtract, Multiply
 -- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT cint,
+EXPLAIN SELECT cint,
        cbigint,
        cstring1,
        cboolean1,
@@ -1391,10 +1193,6 @@ WHERE  (((197 > ctinyint)
 ORDER BY cint, cbigint, cstring1, cboolean1, cfloat, cdouble, ctimestamp2, csmallint, cstring2, cboolean2, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15
 LIMIT 25
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1412,74 +1210,32 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarGreaterLongColumn(val 197, col 0)(children: col 0) -> boolean, FilterLongColEqualLongColumn(col 2, col 3)(children: col 2) -> boolean) -> boolean, FilterLongColEqualLongScalar(col 3, val 359) -> boolean, FilterLongColLessLongScalar(col 10, val 0) -> boolean, FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 6, pattern %ss) -> boolean, FilterDoubleColLessEqualDoubleColumn(col 4, col 12)(children: CastLongToFloatViaLongToDouble(col 0) -> 12:double) -> boolean) -> boolean) -> boolean
                     predicate: (((197 > UDFToInteger(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 8195 Data size: 1735170 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (CAST( cbigint AS decimal(19,0)) % 79.553) (type: decimal(5,3)), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % cfloat) (type: float), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 3, 6, 10, 4, 5, 9, 1, 7, 11, 14, 16, 12, 13, 17, 19, 18, 21, 20, 22, 23, 26, 27, 24, 28]
-                          selectExpressions: DoubleColDivideDoubleColumn(col 12, col 13)(children: CastLongToDouble(col 2) -> 12:double, CastLongToDouble(col 3) -> 13:double) -> 14:double, DecimalColModuloDecimalScalar(col 15, val 79.553)(children: CastLongToDecimal(col 3) -> 15:decimal(19,0)) -> 16:decimal(5,3), DoubleColUnaryMinus(col 17)(children: DoubleColDivideDoubleColumn(col 12, col 13)(children: CastLongToDouble(col 2) -> 12:double, CastLongToDouble(col 3) -> 13:double) -> 17:double) -> 12:double, DoubleScalarModuloDoubleColumn(val 10.175000190734863, col 4) -> 13:double, DoubleColUnaryMinus(col 4) -> 17:double, DoubleColSubtractDoubleColumn(col 4, col 18)(children: DoubleColUnaryMinus(col 4) -> 18:double) -> 19:double, DoubleColModuloDoubleScalar(col 20, val -6432.0)(children: DoubleColSubtractDoubleColumn(col 4, col 18)(children: DoubleColUnaryMinus(col 4) -> 18:double) -> 20:double) -> 18:double, DoubleColMultiplyDoubleColumn(col 5, col 20)(children: CastLongToDouble(col 
 1) -> 20:double) -> 21:double, DoubleColUnaryMinus(col 5) -> 20:double, LongColUnaryMinus(col 3) -> 22:long, DoubleColSubtractDoubleColumn(col 4, col 25)(children: col 4, DoubleColDivideDoubleColumn(col 23, col 24)(children: CastLongToDouble(col 2) -> 23:double, CastLongToDouble(col 3) -> 24:double) -> 25:double) -> 23:double, LongColUnaryMinus(col 1) -> 26:long, LongScalarModuloLongColumn(val 3569, col 3) -> 27:long, DoubleScalarSubtractDoubleColumn(val 359.0, col 5) -> 24:double, LongColUnaryMinus(col 1) -> 28:long
                       Statistics: Num rows: 8195 Data size: 3349694 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: decimal(5,3)), _col12 (type: double), _col13 (type: float), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                         sort order: +++++++++++++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 8195 Data size: 3349694 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: boolean), KEY.reducesinkkey4 (type: float), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: timestamp), KEY.reducesinkkey7 (type: smallint), KEY.reducesinkkey8 (type: string), KEY.reducesinkkey9 (type: boolean), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: decimal(5,3)), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey13 (type: float), KEY.reducesinkkey14 (type: float), KEY.reducesinkkey15 (type: float), KEY.reducesinkkey16 (type: float), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: double), KEY.reducesinkkey19 (type: bigint), KEY.reducesinkkey20 (type: double), KEY.reducesinkkey21 (type: smallint), KEY.reducesinkkey22 (type: bigint), KEY.reducesinkkey23 (type: double), KEY.reducesinkkey21 (type: smallint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 21]
                 Statistics: Num rows: 8195 Data size: 3349694 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 25
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 25 Data size: 10520 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 25 Data size: 10520 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1574,8 +1330,7 @@ PREHOOK: query: -- TargetTypeClasses: String, Bool, Double, Long, Timestamp
 -- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder
 -- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cint,
+EXPLAIN SELECT   cint,
          cstring1,
          cboolean2,
          ctimestamp2,
@@ -1614,8 +1369,7 @@ POSTHOOK: query: -- TargetTypeClasses: String, Bool, Double, Long, Timestamp
 -- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder
 -- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cint,
+EXPLAIN SELECT   cint,
          cstring1,
          cboolean2,
          ctimestamp2,
@@ -1649,10 +1403,6 @@ WHERE    (((csmallint > -26.28)
 ORDER BY cboolean1, cstring1, ctimestamp2, cfloat, cbigint, cstring1, cdouble, cint, csmallint, cdouble, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13
 LIMIT 75
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1670,75 +1420,33 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDecimalColGreaterDecimalScalar(col 12, val -26.28)(children: CastLongToDecimal(col 1) -> 12:decimal(7,2)) -> boolean, FilterStringColLikeStringScalar(col 7, pattern ss) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 5, col 13)(children: CastLongToDouble(col 3) -> 13:double) -> boolean, FilterStringGroupColGreaterEqualStringScalar(col 6, val ss) -> boolean, FilterDoubleColNotEqualDoubleColumn(col 13, col 5)(children: CastLongToDouble(col 2) -> 13:double) -> boolean) -> boolean, FilterLongColEqualLongScalar(col 0, val -89010)(children: col 0) -> boolean, FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13, col 4)(children: CastLongToFloatViaLongToDouble(col 3) -> 13:double) -> boolean, FilterDecimalScalarLessEqualDecimalColumn(val -26.28, col 12)(children: CastLongToDecimal(col 1) -> 12:decimal(7,2)) -> boolean) -> 
 boolean) -> boolean
                     predicate: (((CAST( csmallint AS decimal(7,2)) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= CAST( csmallint AS decimal(7,2))))) (type: boolean)
                     Statistics: Num rows: 10922 Data size: 2312410 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / CAST( ctinyint AS decimal(3,0))) (type: decimal(19,18)), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [2, 6, 11, 9, 5, 4, 3, 1, 10, 14, 15, 16, 13, 18, 19, 20, 22, 25, 27, 24, 17, 28]
-                          selectExpressions: LongColAddLongColumn(col 2, col 1)(children: col 1) -> 14:long, LongColSubtractLongColumn(col 3, col 0)(children: col 0) -> 15:long, LongColUnaryMinus(col 3) -> 16:long, DoubleColUnaryMinus(col 4) -> 13:double, LongColAddLongColumn(col 17, col 3)(children: LongColSubtractLongColumn(col 3, col 0)(children: col 0) -> 17:long) -> 18:long, DoubleColDivideDoubleColumn(col 5, col 5) -> 19:double, DoubleColUnaryMinus(col 5) -> 20:double, LongColMultiplyLongColumn(col 17, col 21)(children: col 17, LongColUnaryMinus(col 3) -> 21:long) -> 22:long, DoubleColAddDoubleColumn(col 23, col 24)(children: DoubleColUnaryMinus(col 5) -> 23:double, CastLongToDouble(col 3) -> 24:double) -> 25:double, DecimalScalarDivideDecimalColumn(val -1.389, col 26)(children: CastLongToDecimal(col 0) -> 26:decimal(3,0)) -> 27:decimal(19,18), DoubleColModuloDoubleColumn(col 23, col 5)(children: CastLongToDouble(col 3) -> 23:double) -> 24:double, LongColUnaryMinus(col 1) -> 1
 7:long, LongColAddLongColumn(col 1, col 21)(children: col 1, LongColAddLongColumn(col 2, col 1)(children: col 1) -> 21:long) -> 28:long
                       Statistics: Num rows: 10922 Data size: 3594034 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: decimal(19,18)), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                         sort order: +++++++++++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 10922 Data size: 3594034 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey7 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: boolean), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: bigint), KEY.reducesinkkey8 (type: smallint), KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey10 (type: int), KEY.reducesinkkey11 (type: bigint), KEY.reducesinkkey12 (type: bigint), KEY.reducesinkkey13 (type: float), KEY.reducesinkkey14 (type: bigint), KEY.reducesinkkey15 (type: double), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: bigint), KEY.reducesinkkey18 (type: double), KEY.reducesinkkey19 (type: decimal(19,18)), KEY.reducesinkkey20 (type: double), KEY.reducesinkkey21 (type: smallint), KEY.reducesinkkey22 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [7, 1, 23, 2, 6, 3, 4, 8, 0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
                 Statistics: Num rows: 10922 Data size: 3594034 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 75
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 75 Data size: 24810 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 75 Data size: 24810 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1904,8 +1612,7 @@ PREHOOK: query: -- TargetTypeClasses: Long, String, Double, Timestamp
 -- ArithmeticOps: Divide, Subtract, Multiply, Remainder
 -- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring2,
          cdouble,
          cfloat,
@@ -1937,8 +1644,7 @@ POSTHOOK: query: -- TargetTypeClasses: Long, String, Double, Timestamp
 -- ArithmeticOps: Divide, Subtract, Multiply, Remainder
 -- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like
 -- GroupBy: NoGroupByProjectColumns
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring2,
          cdouble,
          cfloat,
@@ -1965,10 +1671,6 @@ WHERE    (((-1.389 >= cint)
 ORDER BY csmallint, cstring2, cdouble, cfloat, cbigint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10
 LIMIT 45
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1986,75 +1688,33 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2528254 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDecimalScalarGreaterEqualDecimalColumn(val -1.389, col 12)(children: CastLongToDecimal(col 2) -> 12:decimal(13,3)) -> boolean, FilterLongColLessLongColumn(col 1, col 0)(children: col 0) -> boolean, FilterLongScalarGreaterLongColumn(val -6432, col 1)(children: col 1) -> boolean) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleColumn(col 5, col 4)(children: col 4) -> boolean, FilterStringGroupColLessEqualStringScalar(col 7, val a) -> boolean) -> boolean, FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 6, pattern ss%) -> boolean, FilterDecimalScalarGreaterDecimalColumn(val 10.175, col 13)(children: CastLongToDecimal(col 3) -> 13:decimal(22,3)) -> boolean) -> boolean) -> boolean
                     predicate: (((-1.389 >= CAST( cint AS decimal(13,3))) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > CAST( cbigint AS decimal(22,3))))) (type: boolean)
                     Statistics: Num rows: 3868 Data size: 795962 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [8, 7, 5, 4, 3, 1, 15, 16, 14, 17, 18, 20, 19, 21, 22, 24]
-                          selectExpressions: DoubleColDivideDoubleScalar(col 14, val 3569.0)(children: CastLongToDouble(col 3) -> 14:double) -> 15:double, LongScalarSubtractLongColumn(val -257, col 1)(children: col 1) -> 16:long, DoubleScalarMultiplyDoubleColumn(val -6432.0, col 4) -> 14:double, DoubleColUnaryMinus(col 5) -> 17:double, DoubleColMultiplyDoubleScalar(col 5, val 10.175) -> 18:double, DoubleColDivideDoubleColumn(col 19, col 4)(children: col 19, col 4) -> 20:double, DoubleColUnaryMinus(col 4) -> 19:double, LongColModuloLongColumn(col 2, col 1)(children: col 1) -> 21:long, DoubleColUnaryMinus(col 5) -> 22:double, DoubleColMultiplyDoubleColumn(col 5, col 23)(children: DoubleColUnaryMinus(col 5) -> 23:double) -> 24:double
                       Statistics: Num rows: 3868 Data size: 748844 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                         sort order: +++++++++++++++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                         Statistics: Num rows: 3868 Data size: 748844 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: timestamp), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: bigint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: int), KEY.reducesinkkey7 (type: float), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: float), KEY.reducesinkkey12 (type: int), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey14 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [15, 1, 2, 3, 4, 0, 5, 6, 7, 8, 9, 10, 11, 12, 8, 14]
                 Statistics: Num rows: 3868 Data size: 748844 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 45
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 45 Data size: 8880 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 45 Data size: 8880 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2175,8 +1835,7 @@ PREHOOK: query: -- TargetTypeClasses: Double, String, Long
 -- ArithmeticOps: Remainder, Divide, Subtract
 -- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   csmallint,
+EXPLAIN SELECT   csmallint,
          (csmallint % -75) as c1,
          STDDEV_SAMP(csmallint) as c2,
          (-1.389 / csmallint) as c3,
@@ -2201,8 +1860,7 @@ POSTHOOK: query: -- TargetTypeClasses: Double, String, Long
 -- ArithmeticOps: Remainder, Divide, Subtract
 -- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   csmallint,
+EXPLAIN SELECT   csmallint,
          (csmallint % -75) as c1,
          STDDEV_SAMP(csmallint) as c2,
          (-1.389 / csmallint) as c3,
@@ -2222,10 +1880,6 @@ GROUP BY csmallint
 ORDER BY csmallint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10
 LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2244,34 +1898,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 256884 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 1, val -257)(children: col 1) -> boolean, FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val -6432, col 1)(children: col 1) -> boolean, FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleColumn(col 12, col 5)(children: CastLongToDouble(col 2) -> 12:double) -> boolean, FilterLongColLessEqualLongColumn(col 0, col 2)(children: col 0) -> boolean) -> boolean) -> boolean) -> boolean
                     predicate: ((UDFToInteger(csmallint) >= -257) and ((-6432 = UDFToInteger(csmallint)) or ((UDFToDouble(cint) >= cdouble) and (UDFToInteger(ctinyint) <= cint)))) (type: boolean)
                     Statistics: Num rows: 2503 Data size: 52344 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: csmallint (type: smallint), cbigint (type: bigint), ctinyint (type: tinyint)
                       outputColumnNames: csmallint, cbigint, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [1, 3, 0]
                       Statistics: Num rows: 2503 Data size: 52344 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: stddev_samp(csmallint), sum(cbigint), var_pop(ctinyint), count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFSumLong(col 3) -> bigint, VectorUDAFVarPopLong(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            keyExpressions: col 1
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFStdSampLong(col 1) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopLong(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         keys: csmallint (type: smallint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
@@ -2284,21 +1919,8 @@ STAGE PLANS:
                           value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF stddev_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_samp(VALUE._col0), sum(VALUE._col1), var_pop(VALUE._col2), count(VALUE._col3)
@@ -2317,33 +1939,16 @@ STAGE PLANS:
                     TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: decimal(19,18)), KEY.reducesinkkey4 (type: bigint), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: int), KEY.reducesinkkey7 (type: double), KEY.reducesinkkey8 (type: int), KEY.reducesinkkey9 (type: bigint), KEY.reducesinkkey10 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
                 Statistics: Num rows: 1128 Data size: 197388 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 20 Data size: 3504 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 20 Data size: 3504 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2426,8 +2031,7 @@ PREHOOK: query: -- TargetTypeClasses: Long, Double, Timestamp
 -- ArithmeticOps: Multiply, Add, Subtract, Remainder
 -- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cdouble,
+EXPLAIN SELECT   cdouble,
          VAR_SAMP(cdouble),
          (2563.58 * VAR_SAMP(cdouble)),
          (-(VAR_SAMP(cdouble))),
@@ -2459,8 +2063,7 @@ POSTHOOK: query: -- TargetTypeClasses: Long, Double, Timestamp
 -- ArithmeticOps: Multiply, Add, Subtract, Remainder
 -- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   cdouble,
+EXPLAIN SELECT   cdouble,
          VAR_SAMP(cdouble),
          (2563.58 * VAR_SAMP(cdouble)),
          (-(VAR_SAMP(cdouble))),
@@ -2487,10 +2090,6 @@ WHERE    (((cdouble > 2563.58))
 GROUP BY cdouble
 ORDER BY cdouble
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2509,34 +2108,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 293580 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5, val 2563.58) -> boolean, FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColGreaterEqualLongColumn(col 3, col 2)(children: col 2) -> boolean, FilterLongColLessLongColumn(col 1, col 2)(children: col 1) -> boolean, FilterDoubleColLessDoubleScalar(col 4, val -5638.14990234375) -> boolean) -> boolean, FilterDecimalScalarEqualDecimalColumn(val 2563.58, col 12)(children: CastLongToDecimal(col 0) -> 12:decimal(6,2)) -> boolean, FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 5, col 13)(children: CastLongToDouble(col 3) -> 13:double) -> boolean, FilterDecimalScalarGreaterDecimalColumn(val -5638.15, col 14)(children: CastLongToDecimal(col 3) -> 14:decimal(21,2)) -> boolean) -> boolean) -> boolean) -> boolean
                     predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (cfloat < -5638.15)) or (2563.58 = CAST( ctinyint AS decimal(6,2))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > CAST( cbigint AS decimal(21,2)))))) (type: boolean)
                     Statistics: Num rows: 2503 Data size: 59820 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
                       outputColumnNames: cdouble, cfloat
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [5, 4]
                       Statistics: Num rows: 2503 Data size: 59820 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: var_samp(cdouble), count(cfloat), sum(cfloat), var_pop(cdouble), stddev_pop(cdouble), sum(cdouble)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFVarSampDouble(col 5) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFCount(col 4) -> bigint, VectorUDAFSumDouble(col 4) -> double, VectorUDAFVarPopDouble(col 5) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopDouble(col 5) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFSumDouble(col 5) -> double
-                            className: VectorGroupByOperator
-                            vectorOutput: false
-                            keyExpressions: col 5
-                            native: false
-                            projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                            vectorOutputConditionsNotMet: Vector output of VectorUDAFVarSampDouble(col 5) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopDouble(col 5) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopDouble(col 5) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                         keys: cdouble (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
@@ -2549,21 +2129,8 @@ STAGE PLANS:
                           value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF var_samp parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,variance:double> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: var_samp(VALUE._col0), count(VALUE._col1), sum(VALUE._col2), var_pop(VALUE._col3), stddev_pop(VALUE._col4), sum(VALUE._col5)
@@ -2582,27 +2149,13 @@ STAGE PLANS:
                     value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
         Reducer 3 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13]
                 Statistics: Num rows: 870 Data size: 109608 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 870 Data size: 109608 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2679,8 +2232,7 @@ PREHOOK: query: -- TargetTypeClasses: Bool, Timestamp, String, Double, Long
 -- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder
 -- FilterOps: NotEqual, LessThan, Like, Equal, RLike
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring1,
          STDDEV_POP(cint) as c1,
          (STDDEV_POP(cint) * 10.175) as c2,
@@ -2740,8 +2292,7 @@ POSTHOOK: query: -- TargetTypeClasses: Bool, Timestamp, String, Double, Long
 -- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder
 -- FilterOps: NotEqual, LessThan, Like, Equal, RLike
 -- GroupBy: GroupBy
-EXPLAIN VECTORIZATION EXPRESSION
-SELECT   ctimestamp1,
+EXPLAIN SELECT   ctimestamp1,
          cstring1,
          STDDEV_POP(cint) as c1,
          (STDDEV_POP(cint) * 10.175) as c2,
@@ -2796,10 +2347,6 @@ GROUP BY ctimestamp1, cstring1
 ORDER BY ctimestamp1, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37
 LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -2818,34 +2365,15 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3019778 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
                   Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterDoubleColNotEqualDoubleScalar(col 12, val 0.0)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean, FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -257, col 0)(children: col 0) -> boolean, SelectColumnIsNotNull(col 11) -> boolean, FilterStringColRegExpStringScalar(col 6, pattern .*ss) -> boolean, FilterDoubleScalarLessDoubleColumn(val -3.0, col 12)(children: CastTimestampToDouble(col 8) -> 12:double) -> boolean) -> boolean, FilterDoubleColEqualDoubleScalar(col 12, val -5.0)(childr

<TRUNCATED>

[07/62] [partial] hive git commit: Revert "Revert "Revert "HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)"""

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
index 636463b..ceaac4f 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
@@ -19,10 +19,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__1
 POSTHOOK: Output: default@test
 POSTHOOK: Lineage: test.ts EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT ts FROM test
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT ts FROM test
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
@@ -48,10 +48,10 @@ POSTHOOK: Input: default@test
 #### A masked pattern was here ####
 0001-01-01 00:00:00
 9999-12-31 23:59:59.999999999
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
@@ -87,10 +87,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test
 #### A masked pattern was here ####
 0001-01-01 00:00:00	9999-12-31 23:59:59.999999999	3652060 23:59:59.999999999
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT ts FROM test
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT ts FROM test
 POSTHOOK: type: QUERY
 Plan optimized by CBO.
@@ -116,10 +116,10 @@ POSTHOOK: Input: default@test
 #### A masked pattern was here ####
 0001-01-01 00:00:00
 9999-12-31 23:59:59.999999999
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test
 POSTHOOK: type: QUERY
 Plan optimized by CBO.

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
index ae59b06..4092911 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
@@ -73,7 +73,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@alltypesorc_wrong
 POSTHOOK: Lineage: alltypesorc_wrong.stimestamp1 SIMPLE []
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) AS c1,
   year(ctimestamp1),
   month(ctimestamp1),
@@ -86,7 +86,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) AS c1,
   year(ctimestamp1),
   month(ctimestamp1),
@@ -99,10 +99,6 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -120,61 +116,26 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_string
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: to_unix_timestamp(ctimestamp1) (type: bigint), year(ctimestamp1) (type: int), month(ctimestamp1) (type: int), day(ctimestamp1) (type: int), dayofmonth(ctimestamp1) (type: int), weekofyear(ctimestamp1) (type: int), hour(ctimestamp1) (type: int), minute(ctimestamp1) (type: int), second(ctimestamp1) (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 9, 10]
-                        selectExpressions: VectorUDFUnixTimeStampTimestamp(col 0) -> 2:long, VectorUDFYearTimestamp(col 0, field YEAR) -> 3:long, VectorUDFMonthTimestamp(col 0, field MONTH) -> 4:long, VectorUDFDayOfMonthTimestamp(col 0, field DAY_OF_MONTH) -> 5:long, VectorUDFDayOfMonthTimestamp(col 0, field DAY_OF_MONTH) -> 6:long, VectorUDFWeekOfYearTimestamp(col 0, field WEEK_OF_YEAR) -> 7:long, VectorUDFHourTimestamp(col 0, field HOUR_OF_DAY) -> 8:long, VectorUDFMinuteTimestamp(col 0, field MINUTE) -> 9:long, VectorUDFSecondTimestamp(col 0, field SECOND) -> 10:long
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -257,7 +218,7 @@ NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -270,7 +231,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -283,10 +244,6 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -304,61 +261,26 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_string
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [2, 3, 4, 5, 6, 7, 8, 9, 10]
-                        selectExpressions: VectorUDFUnixTimeStampString(col 1) -> 2:long, VectorUDFYearString(col 1, fieldStart 0, fieldLength 4) -> 3:long, VectorUDFMonthString(col 1, fieldStart 5, fieldLength 2) -> 4:long, VectorUDFDayOfMonthString(col 1, fieldStart 8, fieldLength 2) -> 5:long, VectorUDFDayOfMonthString(col 1, fieldStart 8, fieldLength 2) -> 6:long, VectorUDFWeekOfYearString(col 1) -> 7:long, VectorUDFHourString(col 1, fieldStart 11, fieldLength 2) -> 8:long, VectorUDFMinuteString(col 1, fieldStart 14, fieldLength 2) -> 9:long, VectorUDFSecondString(col 1, fieldStart 17, fieldLength 2) -> 10:long
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -441,7 +363,7 @@ NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1,
   year(ctimestamp1) = year(stimestamp1),
   month(ctimestamp1) = month(stimestamp1),
@@ -454,7 +376,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1,
   year(ctimestamp1) = year(stimestamp1),
   month(ctimestamp1) = month(stimestamp1),
@@ -467,10 +389,6 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_string
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -488,61 +406,26 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_string
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: (to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1)) (type: boolean), (year(ctimestamp1) = year(stimestamp1)) (type: boolean), (month(ctimestamp1) = month(stimestamp1)) (type: boolean), (day(ctimestamp1) = day(stimestamp1)) (type: boolean), (dayofmonth(ctimestamp1) = dayofmonth(stimestamp1)) (type: boolean), (weekofyear(ctimestamp1) = weekofyear(stimestamp1)) (type: boolean), (hour(ctimestamp1) = hour(stimestamp1)) (type: boolean), (minute(ctimestamp1) = minute(stimestamp1)) (type: boolean), (second(ctimestamp1) = second(stimestamp1)) (type: boolean)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [4, 5, 6, 7, 8, 9, 10, 11, 12]
-                        selectExpressions: LongColEqualLongColumn(col 2, col 3)(children: VectorUDFUnixTimeStampTimestamp(col 0) -> 2:long, VectorUDFUnixTimeStampString(col 1) -> 3:long) -> 4:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFYearTimestamp(col 0, field YEAR) -> 2:long, VectorUDFYearString(col 1, fieldStart 0, fieldLength 4) -> 3:long) -> 5:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFMonthTimestamp(col 0, field MONTH) -> 2:long, VectorUDFMonthString(col 1, fieldStart 5, fieldLength 2) -> 3:long) -> 6:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFDayOfMonthTimestamp(col 0, field DAY_OF_MONTH) -> 2:long, VectorUDFDayOfMonthString(col 1, fieldStart 8, fieldLength 2) -> 3:long) -> 7:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFDayOfMonthTimestamp(col 0, field DAY_OF_MONTH) -> 2:long, VectorUDFDayOfMonthString(col 1, fieldStart 8, fieldLength 2) -> 3:long) -> 8:long, LongColEqualLongColumn(col 2, col 3)(childre
 n: VectorUDFWeekOfYearTimestamp(col 0, field WEEK_OF_YEAR) -> 2:long, VectorUDFWeekOfYearString(col 1) -> 3:long) -> 9:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFHourTimestamp(col 0, field HOUR_OF_DAY) -> 2:long, VectorUDFHourString(col 1, fieldStart 11, fieldLength 2) -> 3:long) -> 10:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFMinuteTimestamp(col 0, field MINUTE) -> 2:long, VectorUDFMinuteString(col 1, fieldStart 14, fieldLength 2) -> 3:long) -> 11:long, LongColEqualLongColumn(col 2, col 3)(children: VectorUDFSecondTimestamp(col 0, field SECOND) -> 2:long, VectorUDFSecondString(col 1, fieldStart 17, fieldLength 2) -> 3:long) -> 12:long
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: boolean)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -628,7 +511,7 @@ NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 PREHOOK: query: -- Wrong format. Should all be NULL.
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -642,7 +525,7 @@ FROM alltypesorc_wrong
 ORDER BY c1
 PREHOOK: type: QUERY
 POSTHOOK: query: -- Wrong format. Should all be NULL.
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   to_unix_timestamp(stimestamp1) AS c1,
   year(stimestamp1),
   month(stimestamp1),
@@ -655,10 +538,6 @@ EXPLAIN VECTORIZATION EXPRESSION  SELECT
 FROM alltypesorc_wrong
 ORDER BY c1
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -676,61 +555,26 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_wrong
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0]
                   Select Operator
                     expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7, 8, 9]
-                        selectExpressions: VectorUDFUnixTimeStampString(col 0) -> 1:long, VectorUDFYearString(col 0, fieldStart 0, fieldLength 4) -> 2:long, VectorUDFMonthString(col 0, fieldStart 5, fieldLength 2) -> 3:long, VectorUDFDayOfMonthString(col 0, fieldStart 8, fieldLength 2) -> 4:long, VectorUDFDayOfMonthString(col 0, fieldStart 8, fieldLength 2) -> 5:long, VectorUDFWeekOfYearString(col 0) -> 6:long, VectorUDFHourString(col 0, fieldStart 11, fieldLength 2) -> 7:long, VectorUDFMinuteString(col 0, fieldStart 14, fieldLength 2) -> 8:long, VectorUDFSecondString(col 0, fieldStart 17, fieldLength 2) -> 9:long
                     Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
                 Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -776,24 +620,20 @@ POSTHOOK: Input: default@alltypesorc_wrong
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   min(ctimestamp1),
   max(ctimestamp1),
   count(ctimestamp1),
   count(*)
 FROM alltypesorc_string
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   min(ctimestamp1),
   max(ctimestamp1),
   count(ctimestamp1),
   count(*)
 FROM alltypesorc_string
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -811,73 +651,31 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_string
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
                     outputColumnNames: ctimestamp1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count()
-                      Group By Vectorization:
-                          aggregators: VectorUDAFMinTimestamp(col 0) -> timestamp, VectorUDAFMaxTimestamp(col 0) -> timestamp, VectorUDAFCount(col 0) -> bigint, VectorUDAFCountStar(*) -> bigint
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: Uniform Hash IS false
                         Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFMinTimestamp(col 0) -> timestamp, VectorUDAFMaxTimestamp(col 1) -> timestamp, VectorUDAFCountMerge(col 2) -> bigint, VectorUDAFCountMerge(col 3) -> bigint
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
                   Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -910,19 +708,15 @@ POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
 NULL	NULL	0	40
 PREHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)...
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   round(sum(ctimestamp1), 3)
 FROM alltypesorc_string
 PREHOOK: type: QUERY
 POSTHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)...
-EXPLAIN VECTORIZATION EXPRESSION  SELECT
+EXPLAIN SELECT
   round(sum(ctimestamp1), 3)
 FROM alltypesorc_string
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -955,47 +749,20 @@ STAGE PLANS:
                         value expressions: _col0 (type: double)
             Execution mode: llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Aggregation Function expression for GROUPBY operator: Vectorization of aggreation should have succeeded org.apache.hadoop.hive.ql.metadata.HiveException: Vector aggregate not implemented: "sum" for type: "TIMESTAMP (UDAF evaluator mode = PARTIAL1)
-                vectorized: false
         Reducer 2 
             Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumDouble(col 0) -> double
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    native: false
-                    projectedOutputColumns: [0]
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: round(_col0, 3) (type: double)
                   outputColumnNames: _col0
-                  Select Vectorization:
-                      className: VectorSelectOperator
-                      native: true
-                      projectedOutputColumns: [1]
-                      selectExpressions: RoundWithNumDigitsDoubleToDouble(col 0, decimalPlaces 3) -> 1:double
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1021,7 +788,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
 NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+PREHOOK: query: EXPLAIN SELECT
   round(avg(ctimestamp1), 0),
   variance(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,
   var_pop(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,
@@ -1032,7 +799,7 @@ PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   round(stddev_samp(ctimestamp1), 3)
 FROM alltypesorc_string
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
+POSTHOOK: query: EXPLAIN SELECT
   round(avg(ctimestamp1), 0),
   variance(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,
   var_pop(ctimestamp1) between 8.97077295279421E19 and 8.97077295279422E19,
@@ -1043,10 +810,6 @@ POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT
   round(stddev_samp(ctimestamp1), 3)
 FROM alltypesorc_string
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1064,26 +827,12 @@ STAGE PLANS:
                 TableScan
                   alias: alltypesorc_string
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
                     outputColumnNames: ctimestamp1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0]
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgTimestamp(col 0) -> struct<count:bigint,sum:double>, VectorUDAFVarPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFVarSampTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>, VectorUDAFStdSampTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double>
-                          className: VectorGroupByOperator
-                          vectorOutput: false
-                          native: false
-                          projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                          vectorOutputConditionsNotMet: Vector output of VectorUDAFAvgTimestamp(col 0) -> struct<count:bigint,sum:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFVarSampTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdPopTimestamp(col 0) -> struct<count
 :bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false, Vector output of VectorUDAFStdSampTimestamp(col 0) -> struct<count:bigint,sum:double,variance:double> output type STRUCT requires PRIMITIVE IS false
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                       Statistics: Num rows: 1 Data size: 672 Basic stats: COMPLETE Column stats: NONE
@@ -1093,21 +842,8 @@ STAGE PLANS:
                         value expressions: _col0 (type: struct<count:bigint,sum:double,input:timestamp>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: false
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: Aggregation Function UDF avg parameter expression for GROUPBY operator: Data type struct<count:bigint,sum:double,input:timestamp> of Column[VALUE._col0] not supported
-                vectorized: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7)

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
index 2a142a0..1e74446 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
@@ -1,4 +1,4 @@
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)
@@ -16,7 +16,7 @@ from alltypesorc
 -- limit output to a reasonably small number of rows
 where cbigint % 250 = 0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)
@@ -34,69 +34,22 @@ from alltypesorc
 -- limit output to a reasonably small number of rows
 where cbigint % 250 = 0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 1684250 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColEqualLongScalar(col 12, val 0)(children: LongColModuloLongScalar(col 3, val 250) -> 12:long) -> boolean
-                    predicate: ((cbigint % 250) = 0) (type: boolean)
-                    Statistics: Num rows: 6144 Data size: 842180 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [14, 16, 18, 20, 21, 22, 24, 26, 8, 27, 29]
-                          selectExpressions: CastMillisecondsLongToTimestamp(col 0) -> 14:timestamp, CastMillisecondsLongToTimestamp(col 1) -> 16:timestamp, CastMillisecondsLongToTimestamp(col 2) -> 18:timestamp, CastMillisecondsLongToTimestamp(col 3) -> 20:timestamp, CastDoubleToTimestamp(col 4) -> 21:timestamp, CastDoubleToTimestamp(col 5) -> 22:timestamp, CastMillisecondsLongToTimestamp(col 10) -> 24:timestamp, CastMillisecondsLongToTimestamp(col 12)(children: LongColMultiplyLongScalar(col 3, val 0) -> 12:long) -> 26:timestamp, VectorUDFAdaptor(CAST( cstring1 AS TIMESTAMP)) -> 27:Timestamp, VectorUDFAdaptor(CAST( substr(cstring1, 1, 1) AS TIMESTAMP))(children: StringSubstrColStartLen(col 6, start 0, length 1) -> 28:string) -> 29:Timestamp
-                      Statistics: Num rows: 6144 Data size: 2703360 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 6144 Data size: 2703360 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: ((cbigint % 250) = 0) (type: boolean)
+            Select Operator
+              expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              ListSink
 
 PREHOOK: query: select
 -- to timestamp
@@ -162,7 +115,7 @@ POSTHOOK: Input: default@alltypesorc
 1969-12-31 15:59:59.95	1969-12-31 15:59:52.804	NULL	1969-12-19 17:33:32.75	1969-12-31 15:59:10	1969-12-31 14:00:04	NULL	1969-12-31 16:00:00	1969-12-31 15:59:54.733	NULL	NULL
 1969-12-31 16:00:00.011	NULL	1969-12-30 22:03:04.018	1970-01-21 12:50:53.75	1969-12-31 16:00:11	NULL	1969-12-31 16:00:00.001	1969-12-31 16:00:00	1969-12-31 16:00:02.351	NULL	NULL
 1969-12-31 16:00:00.011	NULL	1969-12-27 18:49:09.583	1970-01-14 22:35:27	1969-12-31 16:00:11	NULL	1969-12-31 16:00:00.001	1969-12-31 16:00:00	1969-12-31 16:00:02.351	NULL	NULL
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)
@@ -180,7 +133,7 @@ from alltypesorc
 -- limit output to a reasonably small number of rows
 where cbigint % 250 = 0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 select
 -- to timestamp
   cast (ctinyint as timestamp)
@@ -198,69 +151,22 @@ from alltypesorc
 -- limit output to a reasonably small number of rows
 where cbigint % 250 = 0
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 1684250 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColEqualLongScalar(col 12, val 0)(children: LongColModuloLongScalar(col 3, val 250) -> 12:long) -> boolean
-                    predicate: ((cbigint % 250) = 0) (type: boolean)
-                    Statistics: Num rows: 6144 Data size: 842180 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19, 20, 8, 21, 23]
-                          selectExpressions: CastLongToTimestamp(col 0) -> 13:timestamp, CastLongToTimestamp(col 1) -> 14:timestamp, CastLongToTimestamp(col 2) -> 15:timestamp, CastLongToTimestamp(col 3) -> 16:timestamp, CastDoubleToTimestamp(col 4) -> 17:timestamp, CastDoubleToTimestamp(col 5) -> 18:timestamp, CastLongToTimestamp(col 10) -> 19:timestamp, CastLongToTimestamp(col 12)(children: LongColMultiplyLongScalar(col 3, val 0) -> 12:long) -> 20:timestamp, VectorUDFAdaptor(CAST( cstring1 AS TIMESTAMP)) -> 21:Timestamp, VectorUDFAdaptor(CAST( substr(cstring1, 1, 1) AS TIMESTAMP))(children: StringSubstrColStartLen(col 6, start 0, length 1) -> 22:string) -> 23:Timestamp
-                      Statistics: Num rows: 6144 Data size: 2703360 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 6144 Data size: 2703360 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          Filter Operator
+            predicate: ((cbigint % 250) = 0) (type: boolean)
+            Select Operator
+              expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              ListSink
 
 PREHOOK: query: select
 -- to timestamp