You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2018/02/27 07:01:35 UTC

[5/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
deleted file mode 100644
index c8959ef..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
+++ /dev/null
@@ -1,943 +0,0 @@
-WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 183488 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: ((UDFToDouble(cbigint) < cdouble) and (cint > 0)) (type: boolean)
-                    Statistics: Num rows: 1365 Data size: 20400 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cbigint (type: bigint), cdouble (type: double)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE
-                      Limit
-                        Number of rows: 7
-                        Statistics: Num rows: 7 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 7 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 7
-      Processor Tree:
-        ListSink
-
-WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--1887561756	-10011.0
--1887561756	-13877.0
--1887561756	-2281.0
--1887561756	-8881.0
--1887561756	10361.0
--1887561756	1839.0
--1887561756	9531.0
-PREHOOK: query: explain vectorization detail
-select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 146796 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:tinyint)
-                    predicate: ctinyint is not null (type: boolean)
-                    Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: ctinyint (type: tinyint), cdouble (type: double), csmallint (type: smallint)
-                      outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 5, 1]
-                      Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint), _col1 (type: double)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 5]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col2 (type: smallint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:tinyint, KEY.reducesinkkey1:double, VALUE._col0:smallint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: smallint)
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--64	-10462.0	-10462
--64	-15920.0	-15920
--64	-1600.0	-1600
--64	-200.0	-200
--64	-2919.0	-2919
--64	-3097.0	-3097
--64	-3586.0	-3586
--64	-4018.0	-4018
--64	-4040.0	-4040
--64	-4803.0	-4803
--64	-6907.0	-6907
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-8080.0	-8080
--64	-9842.0	-9842
-PREHOOK: query: explain vectorization detail
-select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double)
-                    outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 13]
-                        selectExpressions: DoubleColAddDoubleScalar(col 5:double, val 1.0) -> 13:double
-                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: avg(_col1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgDouble(col 13:double) -> struct<count:bigint,sum:double,input:double>
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
-                      keys: _col0 (type: tinyint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 128 Data size: 10628 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            partitionColumnNums: [0]
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 128 Data size: 10628 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:tinyint, VALUE._col0:struct<count:bigint,sum:double,input:double>
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFAvgFinal(col 1:struct<count:bigint,sum:double,input:double>) -> double
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: [0]
-                keys: KEY._col0 (type: tinyint)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 128 Data size: 1412 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46	3033.55
--47	-574.6428571428571
--48	1672.909090909091
--49	768.7659574468086
--50	-960.0192307692307
--51	-96.46341463414635
--52	2810.705882352941
--53	-532.7567567567568
--54	2712.7272727272725
--55	2385.595744680851
--56	2595.818181818182
--57	1867.0535714285713
--58	3483.2444444444445
--59	318.27272727272725
--60	1071.82
--61	914.3404255319149
--62	245.69387755102042
--63	2178.7272727272725
--64	373.52941176470586
-NULL	9370.0945309795
-PREHOOK: query: explain vectorization detail
-select distinct(ctinyint) from alltypesorc limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select distinct(ctinyint) from alltypesorc limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint)
-                    outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0]
-                    Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: ctinyint (type: tinyint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
-                        Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: KEY._col0:tinyint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: tinyint)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46
--47
--48
--49
--50
--51
--52
--53
--54
--55
--56
--57
--58
--59
--60
--61
--62
--63
--64
-NULL
-PREHOOK: query: explain vectorization detail
-select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: ctinyint, cdouble
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 5]
-                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint, col 5:double
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: ctinyint (type: tinyint), cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint), _col1 (type: double)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 1]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            partitionColumnNums: [0]
-                            valueColumnNums: []
-                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:tinyint, KEY._col1:double
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint, col 1:double
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col1)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 1:double) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: COMPLETE
-                      keyExpressions: col 0:tinyint
-                      native: false
-                      vectorProcessingMode: STREAMING
-                      projectedOutputColumnNums: [0]
-                  keys: _col0 (type: tinyint)
-                  mode: complete
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 128 Data size: 1412 Basic stats: COMPLETE Column stats: COMPLETE
-                  Limit
-                    Number of rows: 20
-                    Limit Vectorization:
-                        className: VectorLimitOperator
-                        native: true
-                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46	24
--47	22
--48	29
--49	26
--50	30
--51	21
--52	33
--53	22
--54	26
--55	29
--56	36
--57	35
--58	23
--59	31
--60	27
--61	25
--62	27
--63	19
--64	24
-NULL	2932
-PREHOOK: query: explain vectorization detail
-select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 0
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail
-select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:tinyint)
-                    predicate: ctinyint is not null (type: boolean)
-                    Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(ctinyint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 0:tinyint) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 5:double
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
-                      keys: cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:double, VALUE._col0:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:double
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: [0]
-                keys: KEY._col0 (type: double)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: bigint), _col0 (type: double)
-                  sort order: ++
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkObjectHashOperator
-                      keyColumnNums: [1, 0]
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      valueColumnNums: []
-                  Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                  TopN Hash Memory Usage: 0.3
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY.reducesinkkey0:bigint, KEY.reducesinkkey1:double
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey1 (type: double), KEY.reducesinkkey0 (type: bigint)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [1, 0]
-                Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--10462.0	-64
--1121.0	-89
--11322.0	-101
--11492.0	-78
--15920.0	-64
--4803.0	-64
--6907.0	-64
--7196.0	-2009
--8080.0	-64
--8118.0	-80
--9842.0	-64
-10496.0	-67
-15601.0	-1733
-3520.0	-86
-4811.0	-115
-5241.0	-80
-557.0	-75
-7705.0	-88
-9452.0	-76
-NULL	-32768

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out b/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
deleted file mode 100644
index 2b5a21e..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
+++ /dev/null
@@ -1,684 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS parquet_types_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_types_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE parquet_types_staging (
-  cint int,
-  ctinyint tinyint,
-  csmallint smallint,
-  cfloat float,
-  cdouble double,
-  cstring1 string,
-  t timestamp,
-  cchar char(5),
-  cvarchar varchar(10),
-  cbinary string,
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>,
-  d date
-) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-COLLECTION ITEMS TERMINATED BY ','
-MAP KEYS TERMINATED BY ':'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: CREATE TABLE parquet_types_staging (
-  cint int,
-  ctinyint tinyint,
-  csmallint smallint,
-  cfloat float,
-  cdouble double,
-  cstring1 string,
-  t timestamp,
-  cchar char(5),
-  cvarchar varchar(10),
-  cbinary string,
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>,
-  d date
-) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-COLLECTION ITEMS TERMINATED BY ','
-MAP KEYS TERMINATED BY ':'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_types_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@parquet_types_staging
-PREHOOK: query: DROP TABLE IF EXISTS parquet_project_test
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_project_test
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE parquet_project_test(
-cint int,
-m1 map<string, string>
-) STORED AS PARQUET
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: CREATE TABLE parquet_project_test(
-cint int,
-m1 map<string, string>
-) STORED AS PARQUET
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_project_test
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","red") from parquet_types_staging
-where ctinyint = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","red") from parquet_types_staging
-where ctinyint = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","green") from parquet_types_staging
-where ctinyint = 2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","green") from parquet_types_staging
-where ctinyint = 2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","blue") from parquet_types_staging
-where ctinyint = 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","blue") from parquet_types_staging
-where ctinyint = 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: explain vectorization select * from parquet_project_test
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select * from parquet_project_test
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: cint (type: int), m1 (type: map<string,string>)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Vectorizing complex type MAP not enabled (map<string,string>) since hive.vectorized.complex.types.enabled IS false
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from parquet_project_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from parquet_project_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-PREHOOK: query: explain vectorization select count(*) from parquet_project_test
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_project_test
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 22 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_project_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_project_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-22
-PREHOOK: query: explain vectorization select cint, count(*) from parquet_project_test
-group by cint
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select cint, count(*) from parquet_project_test
-group by cint
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: cint (type: int)
-                    outputColumnNames: cint
-                    Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: cint (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cint, count(*) from parquet_project_test
-group by cint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select cint, count(*) from parquet_project_test
-group by cint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-1	8
-2	7
-3	7
-PREHOOK: query: explain vectorization select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: m1['color'] (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Vectorizing complex type MAP not enabled (map<string,string>) since hive.vectorized.complex.types.enabled IS false
-                vectorized: false
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 11 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 11 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-blue	7
-green	7
-red	8
-PREHOOK: query: create table if not exists parquet_nullsplit(key string, val string) partitioned by (len string)
-stored as parquet
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_nullsplit
-POSTHOOK: query: create table if not exists parquet_nullsplit(key string, val string) partitioned by (len string)
-stored as parquet
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_nullsplit
-PREHOOK: query: insert into table parquet_nullsplit partition(len='1')
-values ('one', 'red')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@parquet_nullsplit@len=1
-POSTHOOK: query: insert into table parquet_nullsplit partition(len='1')
-values ('one', 'red')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@parquet_nullsplit@len=1
-POSTHOOK: Lineage: parquet_nullsplit PARTITION(len=1).key SCRIPT []
-POSTHOOK: Lineage: parquet_nullsplit PARTITION(len=1).val SCRIPT []
-PREHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '1'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '1'
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_nullsplit
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_nullsplit where len = '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_nullsplit
-PREHOOK: Input: default@parquet_nullsplit@len=1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_nullsplit where len = '1'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_nullsplit
-POSTHOOK: Input: default@parquet_nullsplit@len=1
-#### A masked pattern was here ####
-1
-PREHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '99'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '99'
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_nullsplit
-                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (len = '99') (type: boolean)
-                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: unknown
-            Map Vectorization:
-                enabled: true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_nullsplit where len = '99'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_nullsplit
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_nullsplit where len = '99'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_nullsplit
-#### A masked pattern was here ####
-0
-PREHOOK: query: drop table parquet_nullsplit
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_nullsplit
-PREHOOK: Output: default@parquet_nullsplit
-POSTHOOK: query: drop table parquet_nullsplit
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_nullsplit
-POSTHOOK: Output: default@parquet_nullsplit
-PREHOOK: query: drop table parquet_project_test
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_project_test
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: drop table parquet_project_test
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_project_test
-POSTHOOK: Output: default@parquet_project_test
-PREHOOK: query: drop table parquet_types_staging
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: drop table parquet_types_staging
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_types_staging

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
deleted file mode 100644
index 673e607..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
+++ /dev/null
@@ -1,214 +0,0 @@
-PREHOOK: query: create temporary table x (a int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@x
-POSTHOOK: query: create temporary table x (a int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@x
-PREHOOK: query: create temporary table y (b int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@y
-POSTHOOK: query: create temporary table y (b int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@y
-PREHOOK: query: insert into x values(1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@x
-POSTHOOK: query: insert into x values(1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@x
-POSTHOOK: Lineage: x.a SCRIPT []
-PREHOOK: query: insert into y values(1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@y
-POSTHOOK: query: insert into y values(1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@y
-POSTHOOK: Lineage: y.b SCRIPT []
-PREHOOK: query: explain vectorization expression
-select count(1) from x, y where a = b
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count(1) from x, y where a = b
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Map 1 <- Map 3 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: x
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:int)
-                    predicate: a is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: a (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0]
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      Map Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerBigOnlyLongOperator
-                            native: true
-                            nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Fast Hash Table and No Hybrid Hash Join IS true
-                        input vertices:
-                          1 Map 3
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Group By Operator
-                          aggregations: count()
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint
-                              className: VectorGroupByOperator
-                              groupByMode: HASH
-                              native: false
-                              vectorProcessingMode: HASH
-                              projectedOutputColumnNums: [0]
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          Reduce Output Operator
-                            sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkEmptyKeyOperator
-                                native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: y
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:int)
-                    predicate: b is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: b (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0]
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(1) from x, y where a = b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@x
-PREHOOK: Input: default@y
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from x, y where a = b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@x
-POSTHOOK: Input: default@y
-#### A masked pattern was here ####
-1