You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mm...@apache.org on 2017/02/03 20:03:48 UTC

[23/51] [partial] hive git commit: HIVE-11394: Enhance EXPLAIN display for vectorization (Matt McCline, reviewed by Gopal Vijayaraghavan)

http://git-wip-us.apache.org/repos/asf/hive/blob/fcb57100/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
index 391c775..ee8aa0c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
@@ -35,7 +35,7 @@ POSTHOOK: Output: default@interval_arithmetic_1
 POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION [(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE [(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, comment:null), ]
 tsval	tsval
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - interval '2-2' year to month,
@@ -47,7 +47,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - interval '2-2' year to month,
@@ -60,6 +60,10 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -77,26 +81,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 2-2) (type: date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 2, 3, 4, 5, 6, 7]
+                        selectExpressions: DateColSubtractIntervalYearMonthScalar(col 0, val 2-2) -> 2:long, DateColSubtractIntervalYearMonthScalar(col 0, val -2-2) -> 3:long, DateColAddIntervalYearMonthScalar(col 0, val 2-2) -> 4:long, DateColAddIntervalYearMonthScalar(col 0, val -2-2) -> 5:long, IntervalYearMonthScalarAddDateColumn(val -2-2, col 0) -> 6:long, IntervalYearMonthScalarAddDateColumn(val 2-2, col 0) -> 7:long
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: date), VALUE._col4 (type: date), VALUE._col5 (type: date)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -186,7 +225,7 @@ dateval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13	9073-04-13	9077-08-13	9077-08-13	9073-04-13	9073-04-13	9077-08-13
 9209-11-11	9207-09-11	9212-01-11	9212-01-11	9207-09-11	9207-09-11	9212-01-11
 9403-01-09	9400-11-09	9405-03-09	9405-03-09	9400-11-09	9400-11-09	9405-03-09
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - date '1999-06-07',
@@ -195,7 +234,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - date '1999-06-07',
@@ -205,6 +244,10 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -222,26 +265,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 1999-06-07) (type: interval_day_time), (1999-06-07 - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 2, 3, 4]
+                        selectExpressions: DateColSubtractDateScalar(col 0, val 1999-06-07 00:00:00.0) -> 2:timestamp, DateScalarSubtractDateColumn(val 1999-06-07 00:00:00.0, col 0) -> 3:timestamp, DateColSubtractDateColumn(col 0, col 0) -> 4:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -325,7 +403,7 @@ dateval	_c1	_c2	_c3
 9075-06-13	2584462 00:00:00.000000000	-2584462 00:00:00.000000000	0 00:00:00.000000000
 9209-11-11	2633556 01:00:00.000000000	-2633556 01:00:00.000000000	0 00:00:00.000000000
 9403-01-09	2704106 01:00:00.000000000	-2704106 01:00:00.000000000	0 00:00:00.000000000
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   tsval,
   tsval - interval '2-2' year to month,
@@ -337,7 +415,7 @@ select
 from interval_arithmetic_1
 order by tsval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   tsval,
   tsval - interval '2-2' year to month,
@@ -350,6 +428,10 @@ from interval_arithmetic_1
 order by tsval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -367,26 +449,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: tsval (type: timestamp), (tsval - 2-2) (type: timestamp), (tsval - -2-2) (type: timestamp), (tsval + 2-2) (type: timestamp), (tsval + -2-2) (type: timestamp), (-2-2 + tsval) (type: timestamp), (2-2 + tsval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7]
+                        selectExpressions: TimestampColSubtractIntervalYearMonthScalar(col 1, val 2-2) -> 2:timestamp, TimestampColSubtractIntervalYearMonthScalar(col 1, val -2-2) -> 3:timestamp, TimestampColAddIntervalYearMonthScalar(col 1, val 2-2) -> 4:timestamp, TimestampColAddIntervalYearMonthScalar(col 1, val -2-2) -> 5:timestamp, IntervalYearMonthScalarAddTimestampColumn(val -2-2, col 1) -> 6:timestamp, IntervalYearMonthScalarAddTimestampColumn(val 2-2, col 1) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -476,7 +593,7 @@ tsval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9077-08-13 16:20:09.218517797	9077-08-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9073-04-13 16:20:09.218517797	9077-08-13 16:20:09.218517797
 9209-11-11 04:08:58.223768453	9207-09-11 05:08:58.223768453	9212-01-11 04:08:58.223768453	9212-01-11 04:08:58.223768453	9207-09-11 05:08:58.223768453	9207-09-11 05:08:58.223768453	9212-01-11 04:08:58.223768453
 9403-01-09 18:12:33.547	9400-11-09 18:12:33.547	9405-03-09 18:12:33.547	9405-03-09 18:12:33.547	9400-11-09 18:12:33.547	9400-11-09 18:12:33.547	9405-03-09 18:12:33.547
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
@@ -484,7 +601,7 @@ from interval_arithmetic_1
 order by interval '2-2' year to month + interval '3-3' year to month
 limit 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   interval '2-2' year to month + interval '3-3' year to month,
   interval '2-2' year to month - interval '3-3' year to month
@@ -493,6 +610,10 @@ order by interval '2-2' year to month + interval '3-3' year to month
 limit 2
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -510,27 +631,65 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: []
                     Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: CAST( 5-5 AS INTERVAL YEAR TO MONTH) (type: interval_year_month)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                       TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month)
                 outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [1, 2]
+                    selectExpressions: ConstantVectorExpression(val 65) -> 1:long, ConstantVectorExpression(val -13) -> 2:long
                 Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 2
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
                   Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
                     Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -564,7 +723,7 @@ POSTHOOK: Input: default@interval_arithmetic_1
 _c0	_c1
 5-5	-1-1
 5-5	-1-1
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - interval '99 11:22:33.123456789' day to second,
@@ -576,7 +735,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   dateval,
   dateval - interval '99 11:22:33.123456789' day to second,
@@ -589,6 +748,10 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -606,26 +769,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), (dateval - 99 11:22:33.123456789) (type: timestamp), (dateval - -99 11:22:33.123456789) (type: timestamp), (dateval + 99 11:22:33.123456789) (type: timestamp), (dateval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + dateval) (type: timestamp), (99 11:22:33.123456789 + dateval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 2, 3, 4, 5, 6, 7]
+                        selectExpressions: DateColSubtractIntervalDayTimeScalar(col 0, val 99 11:22:33.123456789) -> 2:timestamp, DateColSubtractIntervalDayTimeScalar(col 0, val -99 11:22:33.123456789) -> 3:timestamp, DateColAddIntervalDayTimeScalar(col 0, val 99 11:22:33.123456789) -> 4:timestamp, DateColAddIntervalDayTimeScalar(col 0, val -99 11:22:33.123456789) -> 5:timestamp, IntervalDayTimeScalarAddDateColumn(val -99 11:22:33.123456789, col 0) -> 6:timestamp, IntervalDayTimeScalarAddDateColumn(val 99 11:22:33.123456789, col 0) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -715,7 +913,7 @@ dateval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13	9075-03-05 11:37:26.876543211	9075-09-20 11:22:33.123456789	9075-09-20 11:22:33.123456789	9075-03-05 11:37:26.876543211	9075-03-05 11:37:26.876543211	9075-09-20 11:22:33.123456789
 9209-11-11	9209-08-03 13:37:26.876543211	9210-02-18 11:22:33.123456789	9210-02-18 11:22:33.123456789	9209-08-03 13:37:26.876543211	9209-08-03 13:37:26.876543211	9210-02-18 11:22:33.123456789
 9403-01-09	9402-10-01 13:37:26.876543211	9403-04-18 12:22:33.123456789	9403-04-18 12:22:33.123456789	9402-10-01 13:37:26.876543211	9402-10-01 13:37:26.876543211	9403-04-18 12:22:33.123456789
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   dateval,
   tsval,
@@ -725,7 +923,7 @@ select
 from interval_arithmetic_1
 order by dateval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   dateval,
   tsval,
@@ -736,6 +934,10 @@ from interval_arithmetic_1
 order by dateval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -753,26 +955,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: dateval (type: date), tsval (type: timestamp), (dateval - tsval) (type: interval_day_time), (tsval - dateval) (type: interval_day_time), (tsval - tsval) (type: interval_day_time)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2, 3, 4]
+                        selectExpressions: DateColSubtractTimestampColumn(col 0, col 1) -> 2:interval_day_time, TimestampColSubtractDateColumn(col 1, col 0) -> 3:interval_day_time, TimestampColSubtractTimestampColumn(col 1, col 1) -> 4:interval_day_time
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: timestamp), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3, 4]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -858,7 +1095,7 @@ dateval	tsval	_c2	_c3	_c4
 9075-06-13	9075-06-13 16:20:09.218517797	-0 16:20:09.218517797	0 16:20:09.218517797	0 00:00:00.000000000
 9209-11-11	9209-11-11 04:08:58.223768453	-0 04:08:58.223768453	0 04:08:58.223768453	0 00:00:00.000000000
 9403-01-09	9403-01-09 18:12:33.547	-0 18:12:33.547000000	0 18:12:33.547000000	0 00:00:00.000000000
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   tsval,
   tsval - interval '99 11:22:33.123456789' day to second,
@@ -870,7 +1107,7 @@ select
 from interval_arithmetic_1
 order by tsval
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   tsval,
   tsval - interval '99 11:22:33.123456789' day to second,
@@ -883,6 +1120,10 @@ from interval_arithmetic_1
 order by tsval
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -900,26 +1141,61 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: tsval (type: timestamp), (tsval - 99 11:22:33.123456789) (type: timestamp), (tsval - -99 11:22:33.123456789) (type: timestamp), (tsval + 99 11:22:33.123456789) (type: timestamp), (tsval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + tsval) (type: timestamp), (99 11:22:33.123456789 + tsval) (type: timestamp)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [1, 2, 3, 4, 5, 6, 7]
+                        selectExpressions: TimestampColSubtractIntervalDayTimeScalar(col 1, val 99 11:22:33.123456789) -> 2:timestamp, TimestampColSubtractIntervalDayTimeScalar(col 1, val -99 11:22:33.123456789) -> 3:timestamp, TimestampColAddIntervalDayTimeScalar(col 1, val 99 11:22:33.123456789) -> 4:timestamp, TimestampColAddIntervalDayTimeScalar(col 1, val -99 11:22:33.123456789) -> 5:timestamp, IntervalDayTimeScalarAddTimestampColumn(val -99 11:22:33.123456789, col 1) -> 6:timestamp, IntervalDayTimeScalarAddTimestampColumn(val 99 11:22:33.123456789, col 1) -> 7:timestamp
                     Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: timestamp)
                       sort order: +
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkOperator
+                          native: false
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsNotMet: Uniform Hash IS false
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                groupByVectorOutput: true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1009,14 +1285,14 @@ tsval	_c1	_c2	_c3	_c4	_c5	_c6
 9075-06-13 16:20:09.218517797	9075-03-06 03:57:36.095061008	9075-09-21 03:42:42.341974586	9075-09-21 03:42:42.341974586	9075-03-06 03:57:36.095061008	9075-03-06 03:57:36.095061008	9075-09-21 03:42:42.341974586
 9209-11-11 04:08:58.223768453	9209-08-03 17:46:25.100311664	9210-02-18 15:31:31.347225242	9210-02-18 15:31:31.347225242	9209-08-03 17:46:25.100311664	9209-08-03 17:46:25.100311664	9210-02-18 15:31:31.347225242
 9403-01-09 18:12:33.547	9402-10-02 07:50:00.423543211	9403-04-19 06:35:06.670456789	9403-04-19 06:35:06.670456789	9402-10-02 07:50:00.423543211	9402-10-02 07:50:00.423543211	9403-04-19 06:35:06.670456789
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
   interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second
 from interval_arithmetic_1
 limit 2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
   interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second,
   interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second
@@ -1024,6 +1300,10 @@ from interval_arithmetic_1
 limit 2
 POSTHOOK: type: QUERY
 Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1038,15 +1318,29 @@ STAGE PLANS:
                 TableScan
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Select Operator
                     expressions: 109 20:30:40.246913578 (type: interval_day_time), 89 02:14:26.000000000 (type: interval_day_time)
                     outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [2, 3]
+                        selectExpressions: ConstantVectorExpression(val 109 20:30:40.246913578) -> 2:interval_day_time, ConstantVectorExpression(val 89 02:14:26.000000000) -> 3:interval_day_time
                     Statistics: Num rows: 50 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 2
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1054,6 +1348,14 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/fcb57100/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
index 0bc0e4c..53c728b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
@@ -136,7 +136,7 @@ POSTHOOK: Lineage: vectortab_b_1korc.si SIMPLE [(vectortab_b_1k)vectortab_b_1k.F
 POSTHOOK: Lineage: vectortab_b_1korc.t SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab_b_1korc.ts SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab_b_1korc.ts2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization expression
 select
    v1.s,
    v2.s,
@@ -158,7 +158,7 @@ join
       on v1.intrvl1 = v2.intrvl2 
       and v1.s = v2.s
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization expression
 select
    v1.s,
    v2.s,
@@ -180,6 +180,10 @@ join
       on v1.intrvl1 = v2.intrvl2 
       and v1.s = v2.s
 POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -197,12 +201,24 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab_a_1korc
                   Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 8) -> boolean, SelectColumnIsNotNull(col 14)(children: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp) -> boolean) -> boolean
                     predicate: (s is not null and (dt - CAST( ts AS DATE)) is not null) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: s (type: string), (dt - CAST( ts AS DATE)) (type: interval_day_time)
                       outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [8, 14]
+                          selectExpressions: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp
                       Statistics: Num rows: 1000 Data size: 460264 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
@@ -210,6 +226,10 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: string), _col1 (type: interval_day_time)
                           1 _col0 (type: string), _col1 (type: interval_day_time)
+                        Map Join Vectorization:
+                            className: VectorMapJoinInnerBigOnlyMultiKeyOperator
+                            native: true
+                            nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
                         outputColumnNames: _col0, _col1, _col2
                         input vertices:
                           1 Map 2
@@ -217,9 +237,16 @@ STAGE PLANS:
                         Select Operator
                           expressions: _col0 (type: string), _col2 (type: string), _col1 (type: interval_day_time)
                           outputColumnNames: _col0, _col1, _col2
+                          Select Vectorization:
+                              className: VectorSelectOperator
+                              native: true
+                              projectedOutputColumns: [8, 8, 14]
                           Statistics: Num rows: 1100 Data size: 506290 Basic stats: COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
+                            File Sink Vectorization:
+                                className: VectorFileSinkOperator
+                                native: false
                             Statistics: Num rows: 1100 Data size: 506290 Basic stats: COMPLETE Column stats: NONE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -227,25 +254,57 @@ STAGE PLANS:
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Map 2 
             Map Operator Tree:
                 TableScan
                   alias: vectortab_b_1korc
                   Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 8) -> boolean, SelectColumnIsNotNull(col 14)(children: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp) -> boolean) -> boolean
                     predicate: (s is not null and (dt - CAST( ts AS DATE)) is not null) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: s (type: string), (dt - CAST( ts AS DATE)) (type: interval_day_time)
                       outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [8, 14]
+                          selectExpressions: DateColSubtractDateColumn(col 12, col 13)(children: CastTimestampToDate(col 10) -> 13:date) -> 14:timestamp
                       Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: interval_day_time)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: interval_day_time)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkMultiKeyOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No TopN IS true, Uniform Hash IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/fcb57100/ql/src/test/results/clientpositive/llap/vector_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join.q.out b/ql/src/test/results/clientpositive/llap/vector_join.q.out
new file mode 100644
index 0000000..94c0290
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_join.q.out
@@ -0,0 +1,104 @@
+PREHOOK: query: DROP TABLE IF EXISTS test1_vc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS test1_vc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS test2_vc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS test2_vc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE test1_vc
+ (
+   id string)
+   PARTITIONED BY (
+  cr_year bigint,
+  cr_month bigint)
+ ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
+TBLPROPERTIES (
+  'serialization.null.format'='' )
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test1_vc
+POSTHOOK: query: CREATE TABLE test1_vc
+ (
+   id string)
+   PARTITIONED BY (
+  cr_year bigint,
+  cr_month bigint)
+ ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
+TBLPROPERTIES (
+  'serialization.null.format'='' )
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test1_vc
+PREHOOK: query: CREATE TABLE test2_vc(
+    id string
+  )
+   PARTITIONED BY (
+  cr_year bigint,
+  cr_month bigint)
+ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
+TBLPROPERTIES (
+  'serialization.null.format'=''
+ )
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test2_vc
+POSTHOOK: query: CREATE TABLE test2_vc(
+    id string
+  )
+   PARTITIONED BY (
+  cr_year bigint,
+  cr_month bigint)
+ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
+TBLPROPERTIES (
+  'serialization.null.format'=''
+ )
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test2_vc
+PREHOOK: query: SELECT cr.id1 ,
+cr.id2
+FROM
+(SELECT t1.id id1,
+ t2.id id2
+ from
+ (select * from test1_vc ) t1
+ left outer join test2_vc  t2
+ on t1.id=t2.id) cr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test1_vc
+PREHOOK: Input: default@test2_vc
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cr.id1 ,
+cr.id2
+FROM
+(SELECT t1.id id1,
+ t2.id id2
+ from
+ (select * from test1_vc ) t1
+ left outer join test2_vc  t2
+ on t1.id=t2.id) cr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test1_vc
+POSTHOOK: Input: default@test2_vc
+#### A masked pattern was here ####