You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2018/02/27 07:01:31 UTC

[1/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 987d5fff2 -> ea53203f6


http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/vector_udf_adaptor_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf_adaptor_1.q.out b/ql/src/test/results/clientpositive/vector_udf_adaptor_1.q.out
deleted file mode 100644
index 6efcd8c..0000000
--- a/ql/src/test/results/clientpositive/vector_udf_adaptor_1.q.out
+++ /dev/null
@@ -1,192 +0,0 @@
-PREHOOK: query: create table student_2_lines(
-name string,
-age int,
-gpa double)
-row format delimited
-fields terminated by '\001'
-stored as textfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@student_2_lines
-POSTHOOK: query: create table student_2_lines(
-name string,
-age int,
-gpa double)
-row format delimited
-fields terminated by '\001'
-stored as textfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@student_2_lines
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/student_2_lines' OVERWRITE INTO TABLE student_2_lines
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@student_2_lines
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/student_2_lines' OVERWRITE INTO TABLE student_2_lines
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@student_2_lines
-PREHOOK: query: analyze table student_2_lines compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@student_2_lines
-PREHOOK: Output: default@student_2_lines
-POSTHOOK: query: analyze table student_2_lines compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@student_2_lines
-POSTHOOK: Output: default@student_2_lines
-PREHOOK: query: create table insert_10_1 (a float, b int, c timestamp, d binary)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@insert_10_1
-POSTHOOK: query: create table insert_10_1 (a float, b int, c timestamp, d binary)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@insert_10_1
-PREHOOK: query: explain vectorization detail
-insert overwrite table insert_10_1
-    select cast(gpa as float),
-    age,
-    IF(age>40,cast('2011-01-01 01:01:01' as timestamp),NULL),
-    IF(LENGTH(name)>10,cast(name as binary),NULL) from student_2_lines
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-insert overwrite table insert_10_1
-    select cast(gpa as float),
-    age,
-    IF(age>40,cast('2011-01-01 01:01:01' as timestamp),NULL),
-    IF(LENGTH(name)>10,cast(name as binary),NULL) from student_2_lines
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: student_2_lines
-            Statistics: Num rows: 2 Data size: 37 Basic stats: COMPLETE Column stats: NONE
-            TableScan Vectorization:
-                native: true
-                vectorizationSchemaColumns: [0:name:string, 1:age:int, 2:gpa:double, 3:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-            Select Operator
-              expressions: UDFToFloat(gpa) (type: float), age (type: int), if((age > 40), 2011-01-01 01:01:01.0, null) (type: timestamp), if((length(name) > 10), CAST( name AS BINARY), null) (type: binary)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Select Vectorization:
-                  className: VectorSelectOperator
-                  native: true
-                  projectedOutputColumnNums: [2, 1, 5, 8]
-                  selectExpressions: VectorUDFAdaptor(if((age > 40), 2011-01-01 01:01:01.0, null))(children: LongColGreaterLongScalar(col 1:int, val 40) -> 4:boolean) -> 5:timestamp, VectorUDFAdaptor(if((length(name) > 10), CAST( name AS BINARY), null))(children: LongColGreaterLongScalar(col 4:int, val 10)(children: StringLength(col 0:string) -> 4:int) -> 6:boolean, VectorUDFAdaptor(CAST( name AS BINARY)) -> 7:binary) -> 8:binary
-              Statistics: Num rows: 2 Data size: 37 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                File Sink Vectorization:
-                    className: VectorFileSinkOperator
-                    native: false
-                Statistics: Num rows: 2 Data size: 37 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.insert_10_1
-      Execution mode: vectorized
-      Map Vectorization:
-          enabled: true
-          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-          inputFormatFeatureSupport: [DECIMAL_64]
-          featureSupportInUse: [DECIMAL_64]
-          inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-          allNative: false
-          usesVectorUDFAdaptor: true
-          vectorized: true
-          rowBatchContext:
-              dataColumnCount: 3
-              includeColumns: [0, 1, 2]
-              dataColumns: name:string, age:int, gpa:double
-              partitionColumnCount: 0
-              scratchColumnTypeNames: [bigint, timestamp, bigint, string, string]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.insert_10_1
-
-  Stage: Stage-2
-    Stats Work
-      Basic Stats Work:
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.insert_10_1
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.insert_10_1
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table insert_10_1
-    select cast(gpa as float),
-    age,
-    IF(age>40,cast('2011-01-01 01:01:01' as timestamp),NULL),
-    IF(LENGTH(name)>10,cast(name as binary),NULL) from student_2_lines
-PREHOOK: type: QUERY
-PREHOOK: Input: default@student_2_lines
-PREHOOK: Output: default@insert_10_1
-POSTHOOK: query: insert overwrite table insert_10_1
-    select cast(gpa as float),
-    age,
-    IF(age>40,cast('2011-01-01 01:01:01' as timestamp),NULL),
-    IF(LENGTH(name)>10,cast(name as binary),NULL) from student_2_lines
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@student_2_lines
-POSTHOOK: Output: default@insert_10_1
-POSTHOOK: Lineage: insert_10_1.a EXPRESSION [(student_2_lines)student_2_lines.FieldSchema(name:gpa, type:double, comment:null), ]
-POSTHOOK: Lineage: insert_10_1.b SIMPLE [(student_2_lines)student_2_lines.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: insert_10_1.c EXPRESSION [(student_2_lines)student_2_lines.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: insert_10_1.d EXPRESSION [(student_2_lines)student_2_lines.FieldSchema(name:name, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/vector_udf_string_to_boolean.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf_string_to_boolean.q.out b/ql/src/test/results/clientpositive/vector_udf_string_to_boolean.q.out
deleted file mode 100644
index 647fcb7..0000000
--- a/ql/src/test/results/clientpositive/vector_udf_string_to_boolean.q.out
+++ /dev/null
@@ -1,189 +0,0 @@
-PREHOOK: query: create table t (s string) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t
-POSTHOOK: query: create table t (s string) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t
-PREHOOK: query: insert into t values ('false')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('false')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('FALSE')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('FALSE')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('FaLsE')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('FaLsE')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('true')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('true')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('TRUE')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('TRUE')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('TrUe')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('TrUe')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('Other')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('Other')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('Off')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('Off')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('No')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('No')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('0')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('0')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: insert into t values ('1')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@t
-POSTHOOK: query: insert into t values ('1')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.s SCRIPT []
-PREHOOK: query: explain select s,cast(s as boolean) from t order by s
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select s,cast(s as boolean) from t order by s
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: t
-                  Statistics: Num rows: 12 Data size: 1068 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: s (type: string), UDFToBoolean(s) (type: boolean)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Statistics: Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
-                      value expressions: _col1 (type: boolean)
-            Execution mode: llap
-            LLAP IO: all inputs
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 1116 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select s,cast(s as boolean) from t order by s
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t
-#### A masked pattern was here ####
-POSTHOOK: query: select s,cast(s as boolean) from t order by s
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t
-#### A masked pattern was here ####
-	false
-0	false
-1	true
-FALSE	false
-FaLsE	false
-No	false
-Off	false
-Other	true
-TRUE	true
-TrUe	true
-false	false
-true	true

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_div0.q.out b/ql/src/test/results/clientpositive/vectorization_div0.q.out
deleted file mode 100644
index 2c55516..0000000
--- a/ql/src/test/results/clientpositive/vectorization_div0.q.out
+++ /dev/null
@@ -1,831 +0,0 @@
-PREHOOK: query: explain vectorization expression
-select cint / 0, ctinyint / 0, cbigint / 0, cdouble / 0.0 from alltypesorc limit 100
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select cint / 0, ctinyint / 0, cbigint / 0, cdouble / 0.0 from alltypesorc limit 100
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 220184 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Select Operator
-                    expressions: (UDFToDouble(cint) / 0.0) (type: double), (UDFToDouble(ctinyint) / 0.0) (type: double), (UDFToDouble(cbigint) / 0.0) (type: double), (cdouble / 0.0) (type: double)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [14, 15, 16, 13]
-                        selectExpressions: DoubleColDivideDoubleScalar(col 13:double, val 0.0)(children: CastLongToDouble(col 2:int) -> 13:double) -> 14:double, DoubleColDivideDoubleScalar(col 13:double, val 0.0)(children: CastLongToDouble(col 0:tinyint) -> 13:double) -> 15:double, DoubleColDivideDoubleScalar(col 13:double, val 0.0)(children: CastLongToDouble(col 3:bigint) -> 13:double) -> 16:double, DoubleColDivideDoubleScalar(col 5:double, val 0.0) -> 13:double
-                    Statistics: Num rows: 12288 Data size: 393216 Basic stats: COMPLETE Column stats: COMPLETE
-                    Limit
-                      Number of rows: 100
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 100 Data size: 3200 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 100 Data size: 3200 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 100
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cint / 0, ctinyint / 0, cbigint / 0, cdouble / 0.0 from alltypesorc limit 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select cint / 0, ctinyint / 0, cbigint / 0, cdouble / 0.0 from alltypesorc limit 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression
-select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
-from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
-from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 146792 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val 0), FilterLongColLessLongScalar(col 3:bigint, val 100000000))
-                    predicate: ((cbigint < 100000000) and (cbigint > 0)) (type: boolean)
-                    Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: (cbigint - 988888) (type: bigint), (cdouble / UDFToDouble((cbigint - 988888))) (type: double), (1.2 / CAST( (cbigint - 988888) AS decimal(19,0))) (type: decimal(22,21))
-                      outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [13, 16, 18]
-                          selectExpressions: LongColSubtractLongScalar(col 3:bigint, val 988888) -> 13:bigint, DoubleColDivideDoubleColumn(col 5:double, col 15:double)(children: CastLongToDouble(col 14:bigint)(children: LongColSubtractLongScalar(col 3:bigint, val 988888) -> 14:bigint) -> 15:double) -> 16:double, DecimalScalarDivideDecimalColumn(val 1.2, col 17:decimal(19,0))(children: CastLongToDecimal(col 14:bigint)(children: LongColSubtractLongScalar(col 3:bigint, val 988888) -> 14:bigint) -> 17:decimal(19,0)) -> 18:decimal(22,21)
-                      Statistics: Num rows: 1365 Data size: 174720 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: bigint), _col1 (type: double)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1365 Data size: 174720 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col2 (type: decimal(22,21))
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: bigint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: decimal(22,21))
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 1365 Data size: 174720 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 100
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 100 Data size: 12800 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 100 Data size: 12800 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 100
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
-from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) 
-from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--63925	0.11256941728588189	-0.000018771998435666797
--985319	2.0297994862577501E-4	-0.000001217879691754650
--985319	NULL	-0.000001217879691754650
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-0	NULL	NULL
-10000738	0.001559984873116364	0.000000119991144653525
-10081828	0.0015474376273826532	0.000000119026033770860
-10745355	-6.696847149303117E-4	0.000000111676161466978
-11127199	-1.797397530142132E-5	0.000000107843851808528
-11722580	NULL	0.000000102366543883684
-12649396	NULL	0.000000094866189658384
-13126214	-1.5236685917203544E-5	0.000000091420115503221
-14042667	NULL	0.000000085453852889910
-14943972	-1.3383322720358416E-5	0.000000080299936322150
-16259022	NULL	0.000000073805177211766
-16531556	-1.2098074736582569E-5	0.000000072588448419495
-16596157	NULL	0.000000072305895876979
-17058489	-1.1724367849930905E-5	0.000000070346207099585
-17247320	-4.172242412154468E-4	0.000000069576026884177
-19004427	8.209139901981786E-4	0.000000063143182375349
-19498517	NULL	0.000000061543141973310
-20165679	7.736411950224934E-4	0.000000059507046601307
-20547875	NULL	0.000000058400199534015
-23264783	NULL	0.000000051580107151655
-2331159	NULL	0.000000514765402102559
-2342037	NULL	0.000000512374484263058
-23475527	6.645644206411213E-4	0.000000051117063314489
-24379905	NULL	0.000000049220864478348
-24514624	-2.935390728407664E-4	0.000000048950373458716
-25154198	-2.860755091456305E-4	0.000000047705754721339
-25245192	-7.922300610745999E-6	0.000000047533803664476
-26610943	NULL	0.000000045094230595286
-27520143	5.668938566198584E-4	0.000000043604424584567
-27818379	NULL	0.000000043136949137115
-28400244	NULL	0.000000042253158106670
-28698999	5.43607810153936E-4	0.000000041813305056389
-28806400	-6.9429015774272385E-6	0.000000041657409464563
-29920877	5.214085135271938E-4	0.000000040105776311303
-33126539	NULL	0.000000036224732079617
-34603086	NULL	0.000000034678987879867
-35156265	NULL	0.000000034133318769784
-3533105	-5.660743170667161E-5	0.000000339644590240030
-35862260	NULL	0.000000033461360215447
-36123797	-1.992038655294182E-4	0.000000033219099310075
-36341671	-1.980096072082101E-4	0.000000033019945615599
-36413215	-5.4925114412446145E-6	0.000000032955068647468
-36578596	4.2650625518814335E-4	0.000000032806070522772
-36796441	-1.955623914823719E-4	0.000000032611849607955
-3768727	0.004139594085748318	0.000000318409903397089
-392309	NULL	0.000003058813333367320
-39723587	NULL	0.000000030208752296211
-39985709	-1.7996429674411925E-4	0.000000030010722080731
-40018606	NULL	0.000000029986051987918
-41003161	NULL	0.000000029266036342905
-41158231	3.790493328053871E-4	0.000000029155772025285
-41848817	NULL	0.000000028674645689507
-44047567	-1.633688416888043E-4	0.000000027243275434487
-45125678	NULL	0.000000026592398234992
-45180154	NULL	0.000000026560334433566
-45717793	3.4124569399052136E-4	0.000000026247986205283
-46163162	NULL	0.000000025994753132379
-46525838	3.353190543284787E-4	0.000000025792120068853
-4728619	NULL	0.000000253773881972728
-48626663	NULL	0.000000024677819244969
-49102701	-1.465499830650864E-4	0.000000024438574163161
-50300445	-1.4306036457530346E-4	0.000000023856647789100
-50929325	-1.412938420055636E-4	0.000000023562063702984
-52422534	-1.3726921327381848E-4	0.000000022890919389742
-52667422	2.9621727070673783E-4	0.000000022784483356713
-52962061	2.945693522010029E-4	0.000000022657728520044
-53695172	NULL	0.000000022348377988248
-5391403	NULL	0.000000222576572369010
-54760317	NULL	0.000000021913678841560
-55020655	2.835480602693661E-4	0.000000021809991175132
-56102034	NULL	0.000000021389598815615
-56131313	NULL	0.000000021378441655195
-56838351	-3.5187509222426247E-6	0.000000021112505533456
-56997841	-3.5089048372902406E-6	0.000000021053429023741
-57778807	-1.2454393528755274E-4	0.000000020768860803928
-58080381	NULL	0.000000020661021490200
-58307527	NULL	0.000000020580533281749
-58536385	-1.2293208745295768E-4	0.000000020500070170032
-59347745	NULL	0.000000020219807846111
-60229567	NULL	0.000000019923769334088
-60330397	NULL	0.000000019890470801974
-673083	-0.010691103474608629	0.000001782841046349410
-7022666	-0.0010246820794268159	0.000000170875277280736
-7470430	NULL	0.000000160633323650714
-8276429	NULL	0.000000144990067576246
-8286860	-8.683626850218298E-4	0.000000144807562816314
-8299981	-8.669899364829872E-4	0.000000144578644216174
-9247593	NULL	0.000000129763496295739
-9821695	-7.326637611939691E-4	0.000000122178503812224
-PREHOOK: query: explain vectorization expression
-select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 
-from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 
-from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 146792 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -500.0), FilterDoubleColLessDoubleScalar(col 5:double, val -199.0))
-                    predicate: ((cdouble < -199.0) and (cdouble >= -500.0)) (type: boolean)
-                    Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: (cdouble + 200.0) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3.0 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col4, _col5
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [13, 16, 17, 15, 18]
-                          selectExpressions: DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 13:double, DoubleColDivideDoubleColumn(col 14:double, col 15:double)(children: CastLongToDouble(col 3:bigint) -> 14:double, DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 15:double) -> 16:double, DoubleColDivideDoubleColumn(col 14:double, col 15:double)(children: DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 14:double, DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 15:double) -> 17:double, DoubleScalarDivideDoubleColumn(val 3.0, col 14:double)(children: DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 14:double) -> 15:double, DoubleScalarDivideDoubleColumn(val 1.2, col 14:double)(children: DoubleColAddDoubleScalar(col 5:double, val 200.0) -> 14:double) -> 18:double
-                      Statistics: Num rows: 1365 Data size: 65520 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double), _col1 (type: double)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1365 Data size: 65520 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: double), KEY.reducesinkkey1 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2, 1, 3, 4]
-                Statistics: Num rows: 1365 Data size: 65520 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 100
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 100 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 100 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 100
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 
-from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) 
-from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--113.0	-1.6495816690265486E7	1.0	-1.6495816690265486E7	-0.02654867256637168	-0.010619469026548672
--113.0	NULL	1.0	NULL	-0.02654867256637168	-0.010619469026548672
--116.0	NULL	1.0	NULL	-0.02586206896551724	-0.010344827586206896
--12.0	-1.5533560716666666E8	1.0	-1.5533560716666666E8	-0.25	-0.09999999999999999
--126.0	-1.4793867349206349E7	1.0	-1.4793867349206349E7	-0.023809523809523808	-0.009523809523809523
--126.0	NULL	1.0	NULL	-0.023809523809523808	-0.009523809523809523
--128.0	NULL	1.0	NULL	-0.0234375	-0.009375
--129.0	1.2758548906976745E7	1.0	1.2758548906976745E7	-0.023255813953488372	-0.009302325581395349
--132.0	NULL	1.0	NULL	-0.022727272727272728	-0.00909090909090909
--137.0	NULL	1.0	NULL	-0.021897810218978103	-0.00875912408759124
--138.0	NULL	1.0	NULL	-0.021739130434782608	-0.008695652173913044
--140.0	NULL	1.0	NULL	-0.02142857142857143	-0.008571428571428572
--148.0	NULL	1.0	NULL	-0.02027027027027027	-0.008108108108108109
--152.0	NULL	1.0	NULL	-0.019736842105263157	-0.007894736842105263
--154.0	1.2256894519480519E7	1.0	1.2256894519480519E7	-0.01948051948051948	-0.007792207792207792
--161.0	NULL	1.0	NULL	-0.018633540372670808	-0.007453416149068323
--164.0	NULL	1.0	NULL	-0.018292682926829267	-0.007317073170731707
--169.0	9738774.01775148	1.0	9738774.01775148	-0.01775147928994083	-0.007100591715976331
--17.0	NULL	1.0	NULL	-0.17647058823529413	-0.07058823529411765
--179.0	NULL	1.0	NULL	-0.01675977653631285	-0.0067039106145251395
--181.0	NULL	1.0	NULL	-0.016574585635359115	-0.0066298342541436465
--183.0	8993731.196721312	1.0	8993731.196721312	-0.01639344262295082	-0.006557377049180328
--184.0	8944852.222826088	1.0	8944852.222826088	-0.016304347826086956	-0.006521739130434782
--188.0	NULL	1.0	NULL	-0.015957446808510637	-0.006382978723404255
--189.0	NULL	1.0	NULL	-0.015873015873015872	-0.006349206349206349
--199.0	NULL	1.0	NULL	-0.01507537688442211	-0.006030150753768844
--20.0	NULL	1.0	NULL	-0.15	-0.06
--201.0	NULL	1.0	NULL	-0.014925373134328358	-0.005970149253731343
--21.0	8.988389314285715E7	1.0	8.988389314285715E7	-0.14285714285714285	-0.05714285714285714
--210.0	-8876320.40952381	1.0	-8876320.40952381	-0.014285714285714285	-0.005714285714285714
--225.0	NULL	1.0	NULL	-0.013333333333333334	-0.005333333333333333
--228.0	8278779.631578947	1.0	8278779.631578947	-0.013157894736842105	-0.005263157894736842
--229.0	7187130.170305677	1.0	7187130.170305677	-0.013100436681222707	-0.005240174672489083
--236.0	NULL	1.0	NULL	-0.012711864406779662	-0.005084745762711864
--237.0	NULL	1.0	NULL	-0.012658227848101266	-0.005063291139240506
--246.0	NULL	1.0	NULL	-0.012195121951219513	-0.004878048780487805
--247.0	-7546669.174089069	1.0	-7546669.174089069	-0.012145748987854251	-0.004858299595141701
--247.0	NULL	1.0	NULL	-0.012145748987854251	-0.004858299595141701
--250.0	6583411.236	1.0	6583411.236	-0.012	-0.0048
--257.0	6404096.53307393	1.0	6404096.53307393	-0.011673151750972763	-0.004669260700389105
--273.0	6028764.868131869	1.0	6028764.868131869	-0.01098901098901099	-0.004395604395604396
--274.0	6888911.518248175	1.0	6888911.518248175	-0.010948905109489052	-0.00437956204379562
--279.0	NULL	1.0	NULL	-0.010752688172043012	-0.004301075268817204
--28.0	5.878045746428572E7	1.0	5.878045746428572E7	-0.10714285714285714	-0.04285714285714286
--28.0	6.741291985714285E7	1.0	6.741291985714285E7	-0.10714285714285714	-0.04285714285714286
--281.0	NULL	1.0	NULL	-0.010676156583629894	-0.004270462633451957
--289.0	NULL	1.0	NULL	-0.010380622837370242	-0.004152249134948096
--290.0	NULL	1.0	NULL	-0.010344827586206896	-0.004137931034482759
--292.0	NULL	1.0	NULL	-0.010273972602739725	-0.00410958904109589
--3.0	NULL	1.0	NULL	-1.0	-0.39999999999999997
--38.0	4.3311916026315786E7	1.0	4.3311916026315786E7	-0.07894736842105263	-0.031578947368421054
--46.0	3.577940889130435E7	1.0	3.577940889130435E7	-0.06521739130434782	-0.02608695652173913
--49.0	3.35888328367347E7	1.0	3.35888328367347E7	-0.061224489795918366	-0.024489795918367346
--57.0	-3.27022330877193E7	1.0	-3.27022330877193E7	-0.05263157894736842	-0.021052631578947368
--60.0	NULL	1.0	NULL	-0.05	-0.02
--62.0	3.0444544451612905E7	1.0	3.0444544451612905E7	-0.04838709677419355	-0.01935483870967742
--62.0	NULL	1.0	NULL	-0.04838709677419355	-0.01935483870967742
--69.0	2.735596747826087E7	1.0	2.735596747826087E7	-0.043478260869565216	-0.017391304347826087
--77.0	2.4513789038961038E7	1.0	2.4513789038961038E7	-0.03896103896103896	-0.015584415584415584
--93.0	NULL	1.0	NULL	-0.03225806451612903	-0.012903225806451613
--94.0	-1.9830077510638297E7	1.0	-1.9830077510638297E7	-0.031914893617021274	-0.01276595744680851
--96.0	NULL	1.0	NULL	-0.03125	-0.012499999999999999
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-0.0	NULL	NULL	NULL	NULL	NULL
-PREHOOK: query: explain vectorization expression
-select cint, cbigint, ctinyint, (cint / (cint - 528534767)) as c1, (cbigint / (cbigint - 1018195815)) as c2, (ctinyint / ctinyint) as c3, (cint % (cint - 528534767)) as c4, (cbigint % (cbigint - 1018195815)), (ctinyint % ctinyint) as c3
-from alltypesorc where cint > 500000000 or cdouble > 1000000000 or ctinyint = 0 order by c1, c2 limit 100
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select cint, cbigint, ctinyint, (cint / (cint - 528534767)) as c1, (cbigint / (cbigint - 1018195815)) as c2, (ctinyint / ctinyint) as c3, (cint % (cint - 528534767)) as c4, (cbigint % (cbigint - 1018195815)), (ctinyint % ctinyint) as c3
-from alltypesorc where cint > 500000000 or cdouble > 1000000000 or ctinyint = 0 order by c1, c2 limit 100
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 220184 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 2:int, val 500000000), FilterDoubleColGreaterDoubleScalar(col 5:double, val 1.0E9), FilterLongColEqualLongScalar(col 0:tinyint, val 0))
-                    predicate: ((cdouble > 1.0E9) or (cint > 500000000) or (ctinyint = 0)) (type: boolean)
-                    Statistics: Num rows: 4193 Data size: 75144 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int), cbigint (type: bigint), ctinyint (type: tinyint), (cint / (cint - 528534767)) (type: double), (cbigint / (cbigint - 1018195815)) (type: double), (ctinyint / ctinyint) (type: double), (cint % (cint - 528534767)) (type: int), (cbigint % (cbigint - 1018195815)) (type: bigint), (ctinyint % ctinyint) (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [2, 3, 0, 14, 15, 16, 17, 18, 13]
-                          selectExpressions: LongColDivideLongColumn(col 2:int, col 13:int)(children: LongColSubtractLongScalar(col 2:int, val 528534767) -> 13:int) -> 14:double, LongColDivideLongColumn(col 3:bigint, col 13:bigint)(children: LongColSubtractLongScalar(col 3:bigint, val 1018195815) -> 13:bigint) -> 15:double, LongColDivideLongColumn(col 0:tinyint, col 0:tinyint) -> 16:double, LongColModuloLongColumn(col 2:int, col 13:int)(children: LongColSubtractLongScalar(col 2:int, val 528534767) -> 13:int) -> 17:int, LongColModuloLongColumn(col 3:bigint, col 13:bigint)(children: LongColSubtractLongScalar(col 3:bigint, val 1018195815) -> 13:bigint) -> 18:bigint, LongColModuloLongColumn(col 0:tinyint, col 0:tinyint) -> 13:tinyint
-                      Statistics: Num rows: 4193 Data size: 217816 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col3 (type: double), _col4 (type: double)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 4193 Data size: 217816 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: tinyint), _col5 (type: double), _col6 (type: int), _col7 (type: bigint), _col8 (type: tinyint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: tinyint), KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: int), VALUE._col5 (type: bigint), VALUE._col6 (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [2, 3, 4, 0, 1, 5, 6, 7, 8]
-                Statistics: Num rows: 4193 Data size: 217816 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 100
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 100 Data size: 5216 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 100 Data size: 5216 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 100
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cint, cbigint, ctinyint, (cint / (cint - 528534767)) as c1, (cbigint / (cbigint - 1018195815)) as c2, (ctinyint / ctinyint) as c3, (cint % (cint - 528534767)) as c4, (cbigint % (cbigint - 1018195815)), (ctinyint % ctinyint) as c3
-from alltypesorc where cint > 500000000 or cdouble > 1000000000 or ctinyint = 0 order by c1, c2 limit 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select cint, cbigint, ctinyint, (cint / (cint - 528534767)) as c1, (cbigint / (cbigint - 1018195815)) as c2, (ctinyint / ctinyint) as c3, (cint % (cint - 528534767)) as c4, (cbigint % (cbigint - 1018195815)), (ctinyint % ctinyint) as c3
-from alltypesorc where cint > 500000000 or cdouble > 1000000000 or ctinyint = 0 order by c1, c2 limit 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-528534767	NULL	-1	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-11	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-11	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-11	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-12	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-13	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-16	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-16	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-19	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-21	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-21	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-22	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-22	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-22	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-23	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-23	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-23	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-24	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-28	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-28	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-30	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-32	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-33	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-33	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-34	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-34	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-36	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-37	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-4	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-4	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-40	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-43	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-44	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-45	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-45	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-47	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-48	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-48	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-5	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-5	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-5	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-50	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-51	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-53	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-54	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-55	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-55	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-56	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-56	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-57	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-59	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-62	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	-7	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	0	NULL	NULL	NULL	NULL	NULL	NULL
-528534767	NULL	0	NULL	NULL	NULL	NULL	NULL	NULL
-528534767	NULL	10	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	13	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	16	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	18	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	19	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	2	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	21	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	24	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	24	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	26	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	27	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	28	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	29	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	29	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	30	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	31	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	31	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	33	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	34	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	34	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	36	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	36	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	38	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	38	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	38	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	39	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	4	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	40	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	40	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	41	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	43	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	46	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	5	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	51	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	51	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	53	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	53	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	61	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	61	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	61	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	62	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	9	NULL	NULL	1.0	NULL	NULL	0
-528534767	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
-528534767	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
-NULL	1018195815	0	NULL	NULL	NULL	NULL	NULL	NULL


[7/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/99380fbd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/99380fbd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/99380fbd

Branch: refs/heads/master
Commit: 99380fbd2682df2c02f375b96cd272dd7d913fde
Parents: 987d5ff
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Tue Feb 27 07:47:07 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Feb 27 07:47:07 2018 +0100

----------------------------------------------------------------------
 .../alter_concatenate_indexed_table.q.out       |   80 --
 .../authorization_create_index.q.out            |    9 -
 .../authorization_drop_index.q.out              |   16 -
 .../authorization_uri_index.q.out               |    9 -
 .../results/clientnegative/bad_indextype.q.out  |    1 -
 .../clientnegative/drop_index_failure.q.out     |    1 -
 .../clientnegative/merge_negative_1.q.out       |   16 -
 .../show_create_table_index.q.out               |   16 -
 .../clientnegative/temp_table_index.q.out       |   12 -
 .../truncate_column_indexed_table.q.out         |   26 -
 .../alter_concatenate_indexed_table.q.out       |  271 ----
 .../results/clientpositive/alter_index.q.out    |   67 -
 .../clientpositive/authorization_index.q.out    |   62 -
 .../results/clientpositive/drop_index.q.out     |   10 -
 .../drop_index_removes_partition_dirs.q.out     |   47 -
 .../clientpositive/drop_table_with_index.q.out  |  153 ---
 .../clientpositive/llap/vector_const.q.out      |   66 -
 .../llap/vector_empty_where.q.out               |  652 ---------
 .../llap/vector_gather_stats.q.out              |  108 --
 .../clientpositive/llap/vector_join.q.out       |  104 --
 .../llap/vector_join_part_col_char.q.out        |  175 ---
 .../llap/vector_non_constant_in_expr.q.out      |   51 -
 .../llap/vector_non_string_partition.q.out      |  274 ----
 .../vector_orc_string_reader_empty_dict.q.out   |   62 -
 .../llap/vector_outer_join_no_keys.q.out        |  408 ------
 .../llap/vector_tablesample_rows.q.out          |  400 ------
 .../llap/vectorization_limit.q.out              |  943 -------------
 .../llap/vectorization_parquet_projection.q.out |  684 ----------
 .../llap/vectorized_mapjoin2.q.out              |  214 ---
 .../show_indexes_edge_cases.q.out               |  175 ---
 .../clientpositive/show_indexes_syntax.q.out    |  117 --
 .../spark/index_auto_self_join.q.out            |  225 ----
 .../clientpositive/spark/index_bitmap3.q.out    | 1260 -----------------
 .../spark/index_bitmap_auto.q.out               | 1271 ------------------
 .../results/clientpositive/udf_bitmap_and.q.out |   68 -
 .../clientpositive/udf_bitmap_empty.q.out       |   18 -
 .../results/clientpositive/udf_bitmap_or.q.out  |   68 -
 .../clientpositive/vector_string_decimal.q.out  |  137 --
 .../clientpositive/vector_udf_adaptor_1.q.out   |  192 ---
 .../vector_udf_string_to_boolean.q.out          |  189 ---
 .../clientpositive/vectorization_div0.q.out     |  831 ------------
 41 files changed, 9488 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out b/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
deleted file mode 100644
index 19fbf51..0000000
--- a/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
+++ /dev/null
@@ -1,80 +0,0 @@
-PREHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: show table extended like `src_rc_concatenate_test`
-PREHOOK: type: SHOW_TABLESTATUS
-POSTHOOK: query: show table extended like `src_rc_concatenate_test`
-POSTHOOK: type: SHOW_TABLESTATUS
-tableName:src_rc_concatenate_test
-#### A masked pattern was here ####
-inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-columns:struct columns { i32 key, string value}
-partitioned:false
-partitionColumns:
-totalNumberFiles:3
-totalFileSize:636
-maxFileSize:222
-minFileSize:206
-#### A masked pattern was here ####
-
-PREHOOK: query: select count(1) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-15
-PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-214	-7678496319
-PREHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: Output: default@default__src_rc_concatenate_test_src_rc_concatenate_test_index__
-PREHOOK: query: show indexes on src_rc_concatenate_test
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show indexes on src_rc_concatenate_test
-POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
-FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table default.src_rc_concatenate_test is indexed.

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/authorization_create_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_create_index.q.out b/ql/src/test/results/clientnegative/authorization_create_index.q.out
deleted file mode 100644
index 0e1f41e..0000000
--- a/ql/src/test/results/clientnegative/authorization_create_index.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table t1 (a int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t1
-POSTHOOK: query: create table t1 (a int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t1
-FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation CREATEINDEX [[OBJECT OWNERSHIP] on Object [type=TABLE_OR_VIEW, name=default.t1]]

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/authorization_drop_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_drop_index.q.out b/ql/src/test/results/clientnegative/authorization_drop_index.q.out
deleted file mode 100644
index 72d782d..0000000
--- a/ql/src/test/results/clientnegative/authorization_drop_index.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-PREHOOK: query: create table t1 (a int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t1
-POSTHOOK: query: create table t1 (a int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t1
-PREHOOK: query: create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@t1
-POSTHOOK: query: create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@t1
-POSTHOOK: Output: default@default__t1_t1_index__
-FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPINDEX [[OBJECT OWNERSHIP] on Object [type=TABLE_OR_VIEW, name=default.t1]]

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/authorization_uri_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_uri_index.q.out b/ql/src/test/results/clientnegative/authorization_uri_index.q.out
deleted file mode 100644
index 97b81b1..0000000
--- a/ql/src/test/results/clientnegative/authorization_uri_index.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-PREHOOK: query: create table t1(i int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t1
-POSTHOOK: query: create table t1(i int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t1
-#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/bad_indextype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/bad_indextype.q.out b/ql/src/test/results/clientnegative/bad_indextype.q.out
deleted file mode 100644
index 1ec59a7..0000000
--- a/ql/src/test/results/clientnegative/bad_indextype.q.out
+++ /dev/null
@@ -1 +0,0 @@
-FAILED: SemanticException class name provided for index handler not found.

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/drop_index_failure.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_index_failure.q.out b/ql/src/test/results/clientnegative/drop_index_failure.q.out
deleted file mode 100644
index f64ff5b..0000000
--- a/ql/src/test/results/clientnegative/drop_index_failure.q.out
+++ /dev/null
@@ -1 +0,0 @@
-FAILED: SemanticException [Error 10003]: Invalid index UnknownIndex

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/merge_negative_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/merge_negative_1.q.out b/ql/src/test/results/clientnegative/merge_negative_1.q.out
deleted file mode 100644
index 039a953..0000000
--- a/ql/src/test/results/clientnegative/merge_negative_1.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-PREHOOK: query: create table src2 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src2
-POSTHOOK: query: create table src2 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src2
-PREHOOK: query: CREATE INDEX src_index_merge_test ON TABLE src2(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src2
-POSTHOOK: query: CREATE INDEX src_index_merge_test ON TABLE src2(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src2
-POSTHOOK: Output: default@default__src2_src_index_merge_test__
-FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table default.src2 is indexed.

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/show_create_table_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/show_create_table_index.q.out b/ql/src/test/results/clientnegative/show_create_table_index.q.out
deleted file mode 100644
index 7f03efd..0000000
--- a/ql/src/test/results/clientnegative/show_create_table_index.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-PREHOOK: query: CREATE TABLE tmp_showcrt (key int, value string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tmp_showcrt
-POSTHOOK: query: CREATE TABLE tmp_showcrt (key int, value string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tmp_showcrt
-PREHOOK: query: CREATE INDEX tmp_index on table tmp_showcrt(key) as 'compact' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@tmp_showcrt
-POSTHOOK: query: CREATE INDEX tmp_index on table tmp_showcrt(key) as 'compact' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@tmp_showcrt
-POSTHOOK: Output: default@default__tmp_showcrt_tmp_index__
-FAILED: SemanticException [Error 10144]: SHOW CREATE TABLE does not support tables of type INDEX_TABLE. default__tmp_showcrt_tmp_index__ has table type INDEX_TABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/temp_table_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/temp_table_index.q.out b/ql/src/test/results/clientnegative/temp_table_index.q.out
deleted file mode 100644
index 643d592..0000000
--- a/ql/src/test/results/clientnegative/temp_table_index.q.out
+++ /dev/null
@@ -1,12 +0,0 @@
-PREHOOK: query: create temporary table tmp1 (c1 string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tmp1
-POSTHOOK: query: create temporary table tmp1 (c1 string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tmp1
-PREHOOK: query: create index tmp1_idx on table tmp1 (c1) as 'COMPACT' with deferred rebuild
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@tmp1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: tableName=default.tmp1 is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported.

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out b/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out
deleted file mode 100644
index 451fdba..0000000
--- a/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out
+++ /dev/null
@@ -1,26 +0,0 @@
-PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_tab
-POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_tab
-PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@test_tab
-POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@test_tab
-POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX test_tab_index ON TABLE test_tab (key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@test_tab
-POSTHOOK: query: CREATE INDEX test_tab_index ON TABLE test_tab (key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@test_tab
-POSTHOOK: Output: default@default__test_tab_test_tab_index__
-FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Can not truncate columns from table with indexes

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out b/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
deleted file mode 100644
index ffcbcf9..0000000
--- a/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
+++ /dev/null
@@ -1,271 +0,0 @@
-PREHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: show table extended like `src_rc_concatenate_test`
-PREHOOK: type: SHOW_TABLESTATUS
-POSTHOOK: query: show table extended like `src_rc_concatenate_test`
-POSTHOOK: type: SHOW_TABLESTATUS
-tableName:src_rc_concatenate_test
-#### A masked pattern was here ####
-inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-columns:struct columns { i32 key, string value}
-partitioned:false
-partitionColumns:
-totalNumberFiles:3
-totalFileSize:636
-maxFileSize:222
-minFileSize:206
-#### A masked pattern was here ####
-
-PREHOOK: query: select count(1) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-15
-PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-214	-7678496319
-PREHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: Output: default@default__src_rc_concatenate_test_src_rc_concatenate_test_index__
-PREHOOK: query: show indexes on src_rc_concatenate_test
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show indexes on src_rc_concatenate_test
-POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
-PREHOOK: query: alter table src_rc_concatenate_test concatenate
-PREHOOK: type: ALTER_TABLE_MERGE
-PREHOOK: Input: default@src_rc_concatenate_test
-PREHOOK: Output: default@src_rc_concatenate_test
-POSTHOOK: query: alter table src_rc_concatenate_test concatenate
-POSTHOOK: type: ALTER_TABLE_MERGE
-POSTHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: Output: default@src_rc_concatenate_test
-PREHOOK: query: show table extended like `src_rc_concatenate_test`
-PREHOOK: type: SHOW_TABLESTATUS
-POSTHOOK: query: show table extended like `src_rc_concatenate_test`
-POSTHOOK: type: SHOW_TABLESTATUS
-tableName:src_rc_concatenate_test
-#### A masked pattern was here ####
-inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-columns:struct columns { i32 key, string value}
-partitioned:false
-partitionColumns:
-totalNumberFiles:1
-totalFileSize:239
-maxFileSize:239
-minFileSize:239
-#### A masked pattern was here ####
-
-PREHOOK: query: select count(1) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-15
-PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test
-#### A masked pattern was here ####
-214	-7678496319
-PREHOOK: query: drop index src_rc_concatenate_test_index on src_rc_concatenate_test
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src_rc_concatenate_test
-POSTHOOK: query: drop index src_rc_concatenate_test_index on src_rc_concatenate_test
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src_rc_concatenate_test
-PREHOOK: query: create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_rc_concatenate_test_part
-POSTHOOK: query: create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_rc_concatenate_test_part
-PREHOOK: query: alter table src_rc_concatenate_test_part add partition (ds='2011')
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Output: default@src_rc_concatenate_test_part
-POSTHOOK: query: alter table src_rc_concatenate_test_part add partition (ds='2011')
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Output: default@src_rc_concatenate_test_part
-POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-PREHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
-PREHOOK: type: SHOW_TABLESTATUS
-POSTHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
-POSTHOOK: type: SHOW_TABLESTATUS
-tableName:src_rc_concatenate_test_part
-#### A masked pattern was here ####
-inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-columns:struct columns { i32 key, string value}
-partitioned:true
-partitionColumns:struct partition_columns { string ds}
-totalNumberFiles:3
-totalFileSize:636
-maxFileSize:222
-minFileSize:206
-#### A masked pattern was here ####
-
-PREHOOK: query: select count(1) from src_rc_concatenate_test_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test_part
-PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from src_rc_concatenate_test_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-15
-PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test_part
-PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-214	-7678496319
-PREHOOK: query: create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: query: create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Output: default@default__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__
-PREHOOK: query: show indexes on src_rc_concatenate_test_part
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show indexes on src_rc_concatenate_test_part
-POSTHOOK: type: SHOWINDEXES
-src_rc_concatenate_test_part_index	src_rc_concatenate_test_part	key                 	default__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__	compact             	
-PREHOOK: query: alter table src_rc_concatenate_test_part partition (ds='2011') concatenate
-PREHOOK: type: ALTER_PARTITION_MERGE
-PREHOOK: Input: default@src_rc_concatenate_test_part
-PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-POSTHOOK: query: alter table src_rc_concatenate_test_part partition (ds='2011') concatenate
-POSTHOOK: type: ALTER_PARTITION_MERGE
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
-PREHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
-PREHOOK: type: SHOW_TABLESTATUS
-POSTHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
-POSTHOOK: type: SHOW_TABLESTATUS
-tableName:src_rc_concatenate_test_part
-#### A masked pattern was here ####
-inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-columns:struct columns { i32 key, string value}
-partitioned:true
-partitionColumns:struct partition_columns { string ds}
-totalNumberFiles:1
-totalFileSize:239
-maxFileSize:239
-minFileSize:239
-#### A masked pattern was here ####
-
-PREHOOK: query: select count(1) from src_rc_concatenate_test_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test_part
-PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from src_rc_concatenate_test_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-15
-PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_rc_concatenate_test_part
-PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
-#### A masked pattern was here ####
-214	-7678496319
-PREHOOK: query: drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src_rc_concatenate_test_part
-POSTHOOK: query: drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src_rc_concatenate_test_part

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/alter_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_index.q.out b/ql/src/test/results/clientpositive/alter_index.q.out
deleted file mode 100644
index 262ad07..0000000
--- a/ql/src/test/results/clientpositive/alter_index.q.out
+++ /dev/null
@@ -1,67 +0,0 @@
-PREHOOK: query: drop index src_index_8 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_8 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_8__
-PREHOOK: query: desc extended default__src_src_index_8__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_8__
-POSTHOOK: query: desc extended default__src_src_index_8__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_8__
-key                 	string              	default             
-_bucketname         	string              	                    
-_offsets            	array<bigint>       	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3")
-PREHOOK: type: ALTERINDEX_PROPS
-POSTHOOK: query: alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3")
-POSTHOOK: type: ALTERINDEX_PROPS
-PREHOOK: query: desc extended default__src_src_index_8__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_8__
-POSTHOOK: query: desc extended default__src_src_index_8__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_8__
-key                 	string              	default             
-_bucketname         	string              	                    
-_offsets            	array<bigint>       	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: drop index src_index_8 on default.src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_8 on default.src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: show tables
-PREHOOK: type: SHOWTABLES
-PREHOOK: Input: database:default
-POSTHOOK: query: show tables
-POSTHOOK: type: SHOWTABLES
-POSTHOOK: Input: database:default
-alltypesorc
-alltypesparquet
-cbo_t1
-cbo_t2
-cbo_t3
-lineitem
-part
-src
-src1
-src_cbo
-src_json
-src_sequencefile
-src_thrift
-srcbucket
-srcbucket2
-srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/authorization_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_index.q.out b/ql/src/test/results/clientpositive/authorization_index.q.out
deleted file mode 100644
index 87486b3..0000000
--- a/ql/src/test/results/clientpositive/authorization_index.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-PREHOOK: query: create table t1 (a int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t1
-POSTHOOK: query: create table t1 (a int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t1
-PREHOOK: query: create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@t1
-POSTHOOK: query: create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@t1
-POSTHOOK: Output: default@default__t1_t1_index__
-PREHOOK: query: desc formatted default__t1_t1_index__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__t1_t1_index__
-POSTHOOK: query: desc formatted default__t1_t1_index__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__t1_t1_index__
-# col_name            	data_type           	comment             
-a                   	int                 	                    
-_bucketname         	string              	                    
-_offsets            	array<bigint>       	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	INDEX_TABLE         	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[Order(col:a, order:1)]	 
-PREHOOK: query: alter index t1_index on t1 rebuild
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@t1
-PREHOOK: Output: default@default__t1_t1_index__
-POSTHOOK: query: alter index t1_index on t1 rebuild
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@t1
-POSTHOOK: Output: default@default__t1_t1_index__
-POSTHOOK: Lineage: default__t1_t1_index__._bucketname SIMPLE [(t1)t1.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__t1_t1_index__._offsets EXPRESSION [(t1)t1.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__t1_t1_index__.a SIMPLE [(t1)t1.FieldSchema(name:a, type:int, comment:null), ]
-PREHOOK: query: drop table t1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@t1
-PREHOOK: Output: default@t1
-POSTHOOK: query: drop table t1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@t1
-POSTHOOK: Output: default@t1

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/drop_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/drop_index.q.out b/ql/src/test/results/clientpositive/drop_index.q.out
deleted file mode 100644
index dc154d3..0000000
--- a/ql/src/test/results/clientpositive/drop_index.q.out
+++ /dev/null
@@ -1,10 +0,0 @@
-PREHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON UnknownTable
-PREHOOK: type: DROPINDEX
-POSTHOOK: query: DROP INDEX IF EXISTS UnknownIndex ON UnknownTable
-POSTHOOK: type: DROPINDEX

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out
deleted file mode 100644
index bbd86b4..0000000
--- a/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out
+++ /dev/null
@@ -1,47 +0,0 @@
-PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
-PARTITIONED BY (part STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-PREHOOK: type: CREATETABLE
-#### A masked pattern was here ####
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_table
-POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING)
-PARTITIONED BY (part STRING)
-STORED AS RCFILE
-#### A masked pattern was here ####
-POSTHOOK: type: CREATETABLE
-#### A masked pattern was here ####
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_table
-PREHOOK: query: CREATE INDEX test_index ON 
-TABLE test_table(key) AS 'compact' WITH DEFERRED REBUILD
-IN TABLE test_index_table
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@test_table
-POSTHOOK: query: CREATE INDEX test_index ON 
-TABLE test_table(key) AS 'compact' WITH DEFERRED REBUILD
-IN TABLE test_index_table
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@test_table
-POSTHOOK: Output: default@test_index_table
-PREHOOK: query: ALTER TABLE test_index_table ADD PARTITION (part = '1')
-#### A masked pattern was here ####
-PREHOOK: type: ALTERTABLE_ADDPARTS
-#### A masked pattern was here ####
-PREHOOK: Output: default@test_index_table
-POSTHOOK: query: ALTER TABLE test_index_table ADD PARTITION (part = '1')
-#### A masked pattern was here ####
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-#### A masked pattern was here ####
-POSTHOOK: Output: default@test_index_table
-POSTHOOK: Output: default@test_index_table@part=1
-Found 1 items
-#### A masked pattern was here ####
-PREHOOK: query: DROP INDEX test_index ON test_table
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@test_table
-POSTHOOK: query: DROP INDEX test_index ON test_table
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@test_table
-#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/drop_table_with_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/drop_table_with_index.q.out b/ql/src/test/results/clientpositive/drop_table_with_index.q.out
deleted file mode 100644
index 2312eee..0000000
--- a/ql/src/test/results/clientpositive/drop_table_with_index.q.out
+++ /dev/null
@@ -1,153 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS aa
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS aa
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE aa (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@aa
-POSTHOOK: query: CREATE TABLE aa (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@aa
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@aa
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@aa
-PREHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@aa
-POSTHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@aa
-POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__
-PREHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@aa
-PREHOOK: Output: default@default__aa_aa_lshipdate_idx__
-POSTHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@aa
-POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__
-POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._bucketname SIMPLE [(aa)aa.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._count_of_l_shipdate EXPRESSION [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ]
-POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._offsets EXPRESSION [(aa)aa.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__.l_shipdate SIMPLE [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ]
-PREHOOK: query: show tables
-PREHOOK: type: SHOWTABLES
-PREHOOK: Input: database:default
-POSTHOOK: query: show tables
-POSTHOOK: type: SHOWTABLES
-POSTHOOK: Input: database:default
-aa
-alltypesorc
-alltypesparquet
-cbo_t1
-cbo_t2
-cbo_t3
-default__aa_aa_lshipdate_idx__
-lineitem
-part
-src
-src1
-src_cbo
-src_json
-src_sequencefile
-src_thrift
-srcbucket
-srcbucket2
-srcpart
-PREHOOK: query: explain select l_shipdate, count(l_shipdate)
-from aa
-group by l_shipdate
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select l_shipdate, count(l_shipdate)
-from aa
-group by l_shipdate
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: aa
-            Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: l_shipdate (type: string)
-              outputColumnNames: l_shipdate
-              Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(l_shipdate)
-                keys: l_shipdate (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 120990 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_const.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_const.q.out b/ql/src/test/results/clientpositive/llap/vector_const.q.out
deleted file mode 100644
index 964ddcc..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_const.q.out
+++ /dev/null
@@ -1,66 +0,0 @@
-PREHOOK: query: CREATE TEMPORARY TABLE varchar_const_1 (c1 int) STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@varchar_const_1
-POSTHOOK: query: CREATE TEMPORARY TABLE varchar_const_1 (c1 int) STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@varchar_const_1
-PREHOOK: query: INSERT INTO varchar_const_1 values(42)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@varchar_const_1
-POSTHOOK: query: INSERT INTO varchar_const_1 values(42)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@varchar_const_1
-POSTHOOK: Lineage: varchar_const_1.c1 SCRIPT []
-PREHOOK: query: EXPLAIN
-SELECT CONCAT(CAST('F' AS CHAR(2)), CAST('F' AS VARCHAR(2))) FROM VARCHAR_CONST_1
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-SELECT CONCAT(CAST('F' AS CHAR(2)), CAST('F' AS VARCHAR(2))) FROM VARCHAR_CONST_1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: varchar_const_1
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: 'FF' (type: varchar(4))
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT CONCAT(CAST('F' AS CHAR(2)), CAST('F' AS VARCHAR(2))) FROM VARCHAR_CONST_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_const_1
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT CONCAT(CAST('F' AS CHAR(2)), CAST('F' AS VARCHAR(2))) FROM VARCHAR_CONST_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_const_1
-#### A masked pattern was here ####
-FF

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out b/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
deleted file mode 100644
index 494c5c9..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
+++ /dev/null
@@ -1,652 +0,0 @@
-PREHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cstring1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cstring1
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 899146 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsTrue(col 13:boolean)(children: CastStringToBoolean(col 6) -> 13:boolean)
-                    predicate: cstring1 (type: string)
-                    Statistics: Num rows: 6144 Data size: 449620 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int)
-                      outputColumnNames: cint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [2]
-                      Statistics: Num rows: 6144 Data size: 449620 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        Group By Vectorization:
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 2:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: []
-                        keys: cint (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:int
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0:int) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: HASH
-                      native: false
-                      vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0]
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkEmptyKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count (distinct cint) from alltypesorc where cstring1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select count (distinct cint) from alltypesorc where cstring1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-6041
-PREHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cint
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cint
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsTrue(col 13:boolean)(children: CastLongToBooleanViaLongToLong(col 2:int) -> 13:boolean)
-                    predicate: cint (type: int)
-                    Statistics: Num rows: 6144 Data size: 18348 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 2:int
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: cint (type: int)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:int
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0:int) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: HASH
-                      native: false
-                      vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0]
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkEmptyKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count (distinct cint) from alltypesorc where cint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select count (distinct cint) from alltypesorc where cint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-6082
-PREHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cfloat
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where cfloat
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsTrue(col 13:boolean)(children: CastDoubleToBooleanViaDoubleToLong(col 4:float) -> 13:boolean)
-                    predicate: cfloat (type: float)
-                    Statistics: Num rows: 6144 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int)
-                      outputColumnNames: cint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [2]
-                      Statistics: Num rows: 6144 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        Group By Vectorization:
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 2:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: []
-                        keys: cint (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:int
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0:int) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: HASH
-                      native: false
-                      vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0]
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkEmptyKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count (distinct cint) from alltypesorc where cfloat
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select count (distinct cint) from alltypesorc where cfloat
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-3022
-PREHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where ctimestamp1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count (distinct cint) from alltypesorc where ctimestamp1
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 528216 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsTrue(col 13:boolean)(children: CastTimestampToBoolean(col 8:timestamp) -> 13:boolean)
-                    predicate: ctimestamp1 (type: timestamp)
-                    Statistics: Num rows: 6144 Data size: 264108 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int)
-                      outputColumnNames: cint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [2]
-                      Statistics: Num rows: 6144 Data size: 264108 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        Group By Vectorization:
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            keyExpressions: col 2:int
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: []
-                        keys: cint (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkLongOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:int
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 3016 Data size: 9008 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 0:int) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: HASH
-                      native: false
-                      vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0]
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkEmptyKeyOperator
-                        native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count (distinct cint) from alltypesorc where ctimestamp1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select count (distinct cint) from alltypesorc where ctimestamp1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-3022


[4/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out b/ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out
deleted file mode 100644
index cc6a405..0000000
--- a/ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out
+++ /dev/null
@@ -1,175 +0,0 @@
-PREHOOK: query: DROP TABLE show_idx_empty
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE show_idx_empty
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE show_idx_full
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE show_idx_full
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE show_idx_empty(KEY STRING, VALUE STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@show_idx_empty
-POSTHOOK: query: CREATE TABLE show_idx_empty(KEY STRING, VALUE STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@show_idx_empty
-PREHOOK: query: CREATE TABLE show_idx_full(KEY STRING, VALUE1 STRING, VALUE2 STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@show_idx_full
-POSTHOOK: query: CREATE TABLE show_idx_full(KEY STRING, VALUE1 STRING, VALUE2 STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@show_idx_full
-PREHOOK: query: CREATE INDEX idx_1 ON TABLE show_idx_full(KEY) AS "COMPACT" WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: CREATE INDEX idx_1 ON TABLE show_idx_full(KEY) AS "COMPACT" WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_1__
-PREHOOK: query: CREATE INDEX idx_2 ON TABLE show_idx_full(VALUE1) AS "COMPACT" WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: CREATE INDEX idx_2 ON TABLE show_idx_full(VALUE1) AS "COMPACT" WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_2__
-PREHOOK: query: CREATE INDEX idx_comment ON TABLE show_idx_full(VALUE2) AS "COMPACT" WITH DEFERRED REBUILD COMMENT "index comment"
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: CREATE INDEX idx_comment ON TABLE show_idx_full(VALUE2) AS "COMPACT" WITH DEFERRED REBUILD COMMENT "index comment"
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_comment__
-PREHOOK: query: CREATE INDEX idx_compound ON TABLE show_idx_full(KEY, VALUE1) AS "COMPACT" WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: CREATE INDEX idx_compound ON TABLE show_idx_full(KEY, VALUE1) AS "COMPACT" WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_compound__
-PREHOOK: query: ALTER INDEX idx_1 ON show_idx_full REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@show_idx_full
-PREHOOK: Output: default@default__show_idx_full_idx_1__
-POSTHOOK: query: ALTER INDEX idx_1 ON show_idx_full REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_1__
-POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: ALTER INDEX idx_2 ON show_idx_full REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@show_idx_full
-PREHOOK: Output: default@default__show_idx_full_idx_2__
-POSTHOOK: query: ALTER INDEX idx_2 ON show_idx_full REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_2__
-POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ]
-PREHOOK: query: ALTER INDEX idx_comment ON show_idx_full REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@show_idx_full
-PREHOOK: Output: default@default__show_idx_full_idx_comment__
-POSTHOOK: query: ALTER INDEX idx_comment ON show_idx_full REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_comment__
-POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ]
-PREHOOK: query: ALTER INDEX idx_compound ON show_idx_full REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@show_idx_full
-PREHOOK: Output: default@default__show_idx_full_idx_compound__
-POSTHOOK: query: ALTER INDEX idx_compound ON show_idx_full REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@default__show_idx_full_idx_compound__
-POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN SHOW INDEXES ON show_idx_full
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: EXPLAIN SHOW INDEXES ON show_idx_full
-POSTHOOK: type: SHOWINDEXES
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-      Show Index Operator:
-        Show Indexes
-
-  Stage: Stage-1
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SHOW INDEXES ON show_idx_full
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW INDEXES ON show_idx_full
-POSTHOOK: type: SHOWINDEXES
-idx_1               	show_idx_full       	key                 	default__show_idx_full_idx_1__	compact             	
-idx_2               	show_idx_full       	value1              	default__show_idx_full_idx_2__	compact             	
-idx_comment         	show_idx_full       	value2              	default__show_idx_full_idx_comment__	compact             	index comment       
-idx_compound        	show_idx_full       	key, value1         	default__show_idx_full_idx_compound__	compact             	
-PREHOOK: query: EXPLAIN SHOW INDEXES ON show_idx_empty
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: EXPLAIN SHOW INDEXES ON show_idx_empty
-POSTHOOK: type: SHOWINDEXES
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-      Show Index Operator:
-        Show Indexes
-
-  Stage: Stage-1
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SHOW INDEXES ON show_idx_empty
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW INDEXES ON show_idx_empty
-POSTHOOK: type: SHOWINDEXES
-PREHOOK: query: DROP INDEX idx_1 on show_idx_full
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: DROP INDEX idx_1 on show_idx_full
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@show_idx_full
-PREHOOK: query: DROP INDEX idx_2 on show_idx_full
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@show_idx_full
-POSTHOOK: query: DROP INDEX idx_2 on show_idx_full
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@show_idx_full
-PREHOOK: query: DROP TABLE show_idx_empty
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@show_idx_empty
-PREHOOK: Output: default@show_idx_empty
-POSTHOOK: query: DROP TABLE show_idx_empty
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@show_idx_empty
-POSTHOOK: Output: default@show_idx_empty
-PREHOOK: query: DROP TABLE show_idx_full
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@show_idx_full
-PREHOOK: Output: default@show_idx_full
-POSTHOOK: query: DROP TABLE show_idx_full
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@show_idx_full
-POSTHOOK: Output: default@show_idx_full

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/show_indexes_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_indexes_syntax.q.out b/ql/src/test/results/clientpositive/show_indexes_syntax.q.out
deleted file mode 100644
index bed97f0..0000000
--- a/ql/src/test/results/clientpositive/show_indexes_syntax.q.out
+++ /dev/null
@@ -1,117 +0,0 @@
-PREHOOK: query: DROP TABLE show_idx_t1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE show_idx_t1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE show_idx_t1(KEY STRING, VALUE STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@show_idx_t1
-POSTHOOK: query: CREATE TABLE show_idx_t1(KEY STRING, VALUE STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@show_idx_t1
-PREHOOK: query: CREATE INDEX idx_t1 ON TABLE show_idx_t1(KEY) AS "COMPACT" WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@show_idx_t1
-POSTHOOK: query: CREATE INDEX idx_t1 ON TABLE show_idx_t1(KEY) AS "COMPACT" WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@show_idx_t1
-POSTHOOK: Output: default@default__show_idx_t1_idx_t1__
-PREHOOK: query: ALTER INDEX idx_t1 ON show_idx_t1 REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@show_idx_t1
-PREHOOK: Output: default@default__show_idx_t1_idx_t1__
-POSTHOOK: query: ALTER INDEX idx_t1 ON show_idx_t1 REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@show_idx_t1
-POSTHOOK: Output: default@default__show_idx_t1_idx_t1__
-POSTHOOK: Lineage: default__show_idx_t1_idx_t1__._bucketname SIMPLE [(show_idx_t1)show_idx_t1.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__show_idx_t1_idx_t1__._offsets EXPRESSION [(show_idx_t1)show_idx_t1.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__show_idx_t1_idx_t1__.key SIMPLE [(show_idx_t1)show_idx_t1.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN
-SHOW INDEX ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: EXPLAIN
-SHOW INDEX ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-      Show Index Operator:
-        Show Indexes
-
-  Stage: Stage-1
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SHOW INDEX ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW INDEX ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-idx_t1              	show_idx_t1         	key                 	default__show_idx_t1_idx_t1__	compact             	
-PREHOOK: query: EXPLAIN
-SHOW INDEXES ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: EXPLAIN
-SHOW INDEXES ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-      Show Index Operator:
-        Show Indexes
-
-  Stage: Stage-1
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SHOW INDEXES ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW INDEXES ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-idx_t1              	show_idx_t1         	key                 	default__show_idx_t1_idx_t1__	compact             	
-PREHOOK: query: EXPLAIN
-SHOW FORMATTED INDEXES ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: EXPLAIN
-SHOW FORMATTED INDEXES ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-      Show Index Operator:
-        Show Indexes
-
-  Stage: Stage-1
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SHOW FORMATTED INDEXES ON show_idx_t1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW FORMATTED INDEXES ON show_idx_t1
-POSTHOOK: type: SHOWINDEXES
-idx_name            	tab_name            	col_names           	idx_tab_name        	idx_type            	comment             
-idx_t1              	show_idx_t1         	key                 	default__show_idx_t1_idx_t1__	compact             	
-PREHOOK: query: DROP TABLE show_idx_t1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@show_idx_t1
-PREHOOK: Output: default@show_idx_t1
-POSTHOOK: query: DROP TABLE show_idx_t1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@show_idx_t1
-POSTHOOK: Output: default@show_idx_t1

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
deleted file mode 100644
index e0a701b..0000000
--- a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
+++ /dev/null
@@ -1,225 +0,0 @@
-PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string)
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col2
-                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col2 (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82	82
-83	83
-83	83
-83	83
-83	83
-84	84
-84	84
-84	84
-84	84
-85	85
-86	86
-87	87
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string)
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col2
-                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col2 (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82	82
-83	83
-83	83
-83	83
-83	83
-84	84
-84	84
-84	84
-84	84
-85	85
-86	86
-87	87
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src


[9/9] hive git commit: HIVE-18733: Missing break in CommonFastHashTable (Gergely Hajós via Zoltan Haindrich)

Posted by kg...@apache.org.
HIVE-18733: Missing break in CommonFastHashTable (Gergely Hajós via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ea53203f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ea53203f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ea53203f

Branch: refs/heads/master
Commit: ea53203f615d2fedb8abefda861a4a7ec3301bba
Parents: a926179
Author: Gergely Hajós <ro...@gmail.com>
Authored: Tue Feb 27 07:58:46 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Feb 27 07:58:46 2018 +0100

----------------------------------------------------------------------
 .../hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java       | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ea53203f/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java
index 14d885c..131160a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java
@@ -47,6 +47,7 @@ public class CommonFastHashTable {
         break;
       case 2:
         count = 3;
+        break;
       case 3:
         count = 4 + random.nextInt(7);
         break;


[3/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
deleted file mode 100644
index 1687bd2..0000000
--- a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
+++ /dev/null
@@ -1,1260 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-
-PREHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-PREHOOK: query: ALTER INDEX src1_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src1_index__
-POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src2_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src2_index__
-POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__src_src1_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src1_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	hdfs://### HDFS PATH ###	2088	[1,2,4,8589934592,1,0]
-0	hdfs://### HDFS PATH ###	2632	[1,2,4,8589934592,1,0]
-0	hdfs://### HDFS PATH ###	968	[1,2,4,8589934592,1,0]
-10	hdfs://### HDFS PATH ###	2846	[1,2,4,8589934592,1,0]
-100	hdfs://### HDFS PATH ###	2156	[1,2,4,8589934592,1,0]
-100	hdfs://### HDFS PATH ###	5374	[1,2,4,8589934592,1,0]
-103	hdfs://### HDFS PATH ###	1484	[1,2,4,8589934592,1,0]
-103	hdfs://### HDFS PATH ###	3614	[1,2,4,8589934592,1,0]
-104	hdfs://### HDFS PATH ###	4114	[1,2,4,8589934592,1,0]
-104	hdfs://### HDFS PATH ###	4628	[1,2,4,8589934592,1,0]
-105	hdfs://### HDFS PATH ###	4196	[1,2,4,8589934592,1,0]
-11	hdfs://### HDFS PATH ###	3170	[1,2,4,8589934592,1,0]
-111	hdfs://### HDFS PATH ###	1186	[1,2,4,8589934592,1,0]
-113	hdfs://### HDFS PATH ###	3638	[1,2,4,8589934592,1,0]
-113	hdfs://### HDFS PATH ###	920	[1,2,4,8589934592,1,0]
-114	hdfs://### HDFS PATH ###	4280	[1,2,4,8589934592,1,0]
-116	hdfs://### HDFS PATH ###	3746	[1,2,4,8589934592,1,0]
-118	hdfs://### HDFS PATH ###	2686	[1,2,4,8589934592,1,0]
-118	hdfs://### HDFS PATH ###	2780	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	2064	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	3332	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	4674	[1,2,4,8589934592,1,0]
-12	hdfs://### HDFS PATH ###	1720	[1,2,4,8589934592,1,0]
-12	hdfs://### HDFS PATH ###	4362	[1,2,4,8589934592,1,0]
-120	hdfs://### HDFS PATH ###	2284	[1,2,4,8589934592,1,0]
-120	hdfs://### HDFS PATH ###	4830	[1,2,4,8589934592,1,0]
-125	hdfs://### HDFS PATH ###	1344	[1,2,4,8589934592,1,0]
-125	hdfs://### HDFS PATH ###	4468	[1,2,4,8589934592,1,0]
-126	hdfs://### HDFS PATH ###	5732	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	208	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	3896	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	988	[1,2,4,8589934592,1,0]
-129	hdfs://### HDFS PATH ###	1094	[1,2,4,8589934592,1,0]
-129	hdfs://### HDFS PATH ###	2040	[1,2,4,8589934592,1,0]
-131	hdfs://### HDFS PATH ###	2296	[1,2,4,8589934592,1,0]
-133	hdfs://### HDFS PATH ###	5164	[1,2,4,8589934592,1,0]
-134	hdfs://### HDFS PATH ###	2698	[1,2,4,8589934592,1,0]
-134	hdfs://### HDFS PATH ###	5294	[1,2,4,8589934592,1,0]
-136	hdfs://### HDFS PATH ###	5080	[1,2,4,8589934592,1,0]
-137	hdfs://### HDFS PATH ###	1650	[1,2,4,8589934592,1,0]
-137	hdfs://### HDFS PATH ###	2552	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	1472	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	1848	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	2734	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	3470	[1,2,4,8589934592,1,0]
-143	hdfs://### HDFS PATH ###	3226	[1,2,4,8589934592,1,0]
-145	hdfs://### HDFS PATH ###	304	[1,2,4,8589934592,1,0]
-146	hdfs://### HDFS PATH ###	232	[1,2,4,8589934592,1,0]
-146	hdfs://### HDFS PATH ###	5430	[1,2,4,8589934592,1,0]
-149	hdfs://### HDFS PATH ###	1058	[1,2,4,8589934592,1,0]
-149	hdfs://### HDFS PATH ###	3422	[1,2,4,8589934592,1,0]
-15	hdfs://### HDFS PATH ###	2770	[1,2,4,8589934592,1,0]
-15	hdfs://### HDFS PATH ###	386	[1,2,4,8589934592,1,0]
-150	hdfs://### HDFS PATH ###	150	[1,2,4,8589934592,1,0]
-152	hdfs://### HDFS PATH ###	280	[1,2,4,8589934592,1,0]
-152	hdfs://### HDFS PATH ###	5648	[1,2,4,8589934592,1,0]
-153	hdfs://### HDFS PATH ###	502	[1,2,4,8589934592,1,0]
-155	hdfs://### HDFS PATH ###	932	[1,2,4,8589934592,1,0]
-156	hdfs://### HDFS PATH ###	2352	[1,2,4,8589934592,1,0]
-157	hdfs://### HDFS PATH ###	1140	[1,2,4,8589934592,1,0]
-158	hdfs://### HDFS PATH ###	2052	[1,2,4,8589934592,1,0]
-160	hdfs://### HDFS PATH ###	3274	[1,2,4,8589934592,1,0]
-162	hdfs://### HDFS PATH ###	754	[1,2,4,8589934592,1,0]
-163	hdfs://### HDFS PATH ###	4650	[1,2,4,8589934592,1,0]
-164	hdfs://### HDFS PATH ###	4408	[1,2,4,8589934592,1,0]
-164	hdfs://### HDFS PATH ###	4492	[1,2,4,8589934592,1,0]
-165	hdfs://### HDFS PATH ###	2236	[1,2,4,8589934592,1,0]
-165	hdfs://### HDFS PATH ###	44	[1,2,4,8589934592,1,0]
-166	hdfs://### HDFS PATH ###	418	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	3686	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	5502	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	874	[1,2,4,8589934592,1,0]
-168	hdfs://### HDFS PATH ###	3180	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	1308	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	2588	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	4854	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	5754	[1,2,4,8589934592,1,0]
-17	hdfs://### HDFS PATH ###	910	[1,2,4,8589934592,1,0]
-170	hdfs://### HDFS PATH ###	1106	[1,2,4,8589934592,1,0]
-172	hdfs://### HDFS PATH ###	2018	[1,2,4,8589934592,1,0]
-172	hdfs://### HDFS PATH ###	5104	[1,2,4,8589934592,1,0]
-174	hdfs://### HDFS PATH ###	598	[1,2,4,8589934592,1,0]
-174	hdfs://### HDFS PATH ###	682	[1,2,4,8589934592,1,0]
-175	hdfs://### HDFS PATH ###	4150	[1,2,4,8589934592,1,0]
-175	hdfs://### HDFS PATH ###	5176	[1,2,4,8589934592,1,0]
-176	hdfs://### HDFS PATH ###	1428	[1,2,4,8589934592,1,0]
-176	hdfs://### HDFS PATH ###	1556	[1,2,4,8589934592,1,0]
-177	hdfs://### HDFS PATH ###	3036	[1,2,4,8589934592,1,0]
-178	hdfs://### HDFS PATH ###	4938	[1,2,4,8589934592,1,0]
-179	hdfs://### HDFS PATH ###	2006	[1,2,4,8589934592,1,0]
-179	hdfs://### HDFS PATH ###	2674	[1,2,4,8589934592,1,0]
-18	hdfs://### HDFS PATH ###	5340	[1,2,4,8589934592,1,0]
-18	hdfs://### HDFS PATH ###	5514	[1,2,4,8589934592,1,0]
-180	hdfs://### HDFS PATH ###	1696	[1,2,4,8589934592,1,0]
-181	hdfs://### HDFS PATH ###	1742	[1,2,4,8589934592,1,0]
-183	hdfs://### HDFS PATH ###	5536	[1,2,4,8589934592,1,0]
-186	hdfs://### HDFS PATH ###	5466	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	1416	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	2492	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	4516	[1,2,4,8589934592,1,0]
-189	hdfs://### HDFS PATH ###	5188	[1,2,4,8589934592,1,0]
-19	hdfs://### HDFS PATH ###	2824	[1,2,4,8589934592,1,0]
-190	hdfs://### HDFS PATH ###	4244	[1,2,4,8589934592,1,0]
-191	hdfs://### HDFS PATH ###	2192	[1,2,4,8589934592,1,0]
-191	hdfs://### HDFS PATH ###	3852	[1,2,4,8589934592,1,0]
-192	hdfs://### HDFS PATH ###	1392	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	126	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	4078	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	514	[1,2,4,8589934592,1,0]
-194	hdfs://### HDFS PATH ###	5684	[1,2,4,8589934592,1,0]
-195	hdfs://### HDFS PATH ###	3286	[1,2,4,8589934592,1,0]
-195	hdfs://### HDFS PATH ###	886	[1,2,4,8589934592,1,0]
-196	hdfs://### HDFS PATH ###	2410	[1,2,4,8589934592,1,0]
-197	hdfs://### HDFS PATH ###	2108	[1,2,4,8589934592,1,0]
-197	hdfs://### HDFS PATH ###	2480	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	2180	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	4806	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	646	[1,2,4,8589934592,1,0]
-2	hdfs://### HDFS PATH ###	4004	[1,2,4,8589934592,1,0]
-20	hdfs://### HDFS PATH ###	1118	[1,2,4,8589934592,1,0]
-200	hdfs://### HDFS PATH ###	4698	[1,2,4,8589934592,1,0]
-200	hdfs://### HDFS PATH ###	5790	[1,2,4,8589934592,1,0]
-201	hdfs://### HDFS PATH ###	4384	[1,2,4,8589934592,1,0]
-202	hdfs://### HDFS PATH ###	3932	[1,2,4,8589934592,1,0]
-203	hdfs://### HDFS PATH ###	4314	[1,2,4,8589934592,1,0]
-203	hdfs://### HDFS PATH ###	944	[1,2,4,8589934592,1,0]
-205	hdfs://### HDFS PATH ###	1046	[1,2,4,8589934592,1,0]
-205	hdfs://### HDFS PATH ###	2272	[1,2,4,8589934592,1,0]
-207	hdfs://### HDFS PATH ###	5022	[1,2,4,8589934592,1,0]
-207	hdfs://### HDFS PATH ###	634	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	1272	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	1948	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	670	[1,2,4,8589934592,1,0]
-209	hdfs://### HDFS PATH ###	3504	[1,2,4,8589934592,1,0]
-209	hdfs://### HDFS PATH ###	374	[1,2,4,8589934592,1,0]
-213	hdfs://### HDFS PATH ###	1508	[1,2,4,8589934592,1,0]
-213	hdfs://### HDFS PATH ###	220	[1,2,4,8589934592,1,0]
-214	hdfs://### HDFS PATH ###	5116	[1,2,4,8589934592,1,0]
-216	hdfs://### HDFS PATH ###	1520	[1,2,4,8589934592,1,0]
-216	hdfs://### HDFS PATH ###	3650	[1,2,4,8589934592,1,0]
-217	hdfs://### HDFS PATH ###	1860	[1,2,4,8589934592,1,0]
-217	hdfs://### HDFS PATH ###	4396	[1,2,4,8589934592,1,0]
-218	hdfs://### HDFS PATH ###	3446	[1,2,4,8589934592,1,0]
-219	hdfs://### HDFS PATH ###	3710	[1,2,4,8589934592,1,0]
-219	hdfs://### HDFS PATH ###	478	[1,2,4,8589934592,1,0]
-221	hdfs://### HDFS PATH ###	1164	[1,2,4,8589934592,1,0]
-221	hdfs://### HDFS PATH ###	1580	[1,2,4,8589934592,1,0]
-222	hdfs://### HDFS PATH ###	5720	[1,2,4,8589934592,1,0]
-223	hdfs://### HDFS PATH ###	3398	[1,2,4,8589934592,1,0]
-223	hdfs://### HDFS PATH ###	3758	[1,2,4,8589934592,1,0]
-224	hdfs://### HDFS PATH ###	174	[1,2,4,8589934592,1,0]
-224	hdfs://### HDFS PATH ###	2892	[1,2,4,8589934592,1,0]
-226	hdfs://### HDFS PATH ###	3048	[1,2,4,8589934592,1,0]
-228	hdfs://### HDFS PATH ###	3458	[1,2,4,8589934592,1,0]
-229	hdfs://### HDFS PATH ###	3202	[1,2,4,8589934592,1,0]
-229	hdfs://### HDFS PATH ###	3956	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	1730	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	1936	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	2260	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	3580	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	4914	[1,2,4,8589934592,1,0]
-233	hdfs://### HDFS PATH ###	3214	[1,2,4,8589934592,1,0]
-233	hdfs://### HDFS PATH ###	5140	[1,2,4,8589934592,1,0]
-235	hdfs://### HDFS PATH ###	4046	[1,2,4,8589934592,1,0]
-237	hdfs://### HDFS PATH ###	4722	[1,2,4,8589934592,1,0]
-237	hdfs://### HDFS PATH ###	574	[1,2,4,8589934592,1,0]
-238	hdfs://### HDFS PATH ###	0	[1,2,4,8589934592,1,0]
-238	hdfs://### HDFS PATH ###	2746	[1,2,4,8589934592,1,0]
-239	hdfs://### HDFS PATH ###	1496	[1,2,4,8589934592,1,0]
-239	hdfs://### HDFS PATH ###	3722	[1,2,4,8589934592,1,0]
-24	hdfs://### HDFS PATH ###	1972	[1,2,4,8589934592,1,0]
-24	hdfs://### HDFS PATH ###	4594	[1,2,4,8589934592,1,0]
-241	hdfs://### HDFS PATH ###	1662	[1,2,4,8589934592,1,0]
-242	hdfs://### HDFS PATH ###	2940	[1,2,4,8589934592,1,0]
-242	hdfs://### HDFS PATH ###	3012	[1,2,4,8589934592,1,0]
-244	hdfs://### HDFS PATH ###	3872	[1,2,4,8589934592,1,0]
-247	hdfs://### HDFS PATH ###	718	[1,2,4,8589934592,1,0]
-248	hdfs://### HDFS PATH ###	4758	[1,2,4,8589934592,1,0]
-249	hdfs://### HDFS PATH ###	5034	[1,2,4,8589934592,1,0]
-252	hdfs://### HDFS PATH ###	454	[1,2,4,8589934592,1,0]
-255	hdfs://### HDFS PATH ###	4616	[1,2,4,8589934592,1,0]
-255	hdfs://### HDFS PATH ###	68	[1,2,4,8589934592,1,0]
-256	hdfs://### HDFS PATH ###	3770	[1,2,4,8589934592,1,0]
-256	hdfs://### HDFS PATH ###	5272	[1,2,4,8589934592,1,0]
-257	hdfs://### HDFS PATH ###	4208	[1,2,4,8589934592,1,0]
-258	hdfs://### HDFS PATH ###	4292	[1,2,4,8589934592,1,0]
-26	hdfs://### HDFS PATH ###	2226	[1,2,4,8589934592,1,0]
-26	hdfs://### HDFS PATH ###	5284	[1,2,4,8589934592,1,0]
-260	hdfs://### HDFS PATH ###	1764	[1,2,4,8589934592,1,0]
-262	hdfs://### HDFS PATH ###	4326	[1,2,4,8589934592,1,0]
-263	hdfs://### HDFS PATH ###	3782	[1,2,4,8589934592,1,0]
-265	hdfs://### HDFS PATH ###	114	[1,2,4,8589934592,1,0]
-265	hdfs://### HDFS PATH ###	5046	[1,2,4,8589934592,1,0]
-266	hdfs://### HDFS PATH ###	814	[1,2,4,8589934592,1,0]
-27	hdfs://### HDFS PATH ###	34	[1,2,4,8589934592,1,0]
-272	hdfs://### HDFS PATH ###	1836	[1,2,4,8589934592,1,0]
-272	hdfs://### HDFS PATH ###	2976	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	162	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	2868	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	5524	[1,2,4,8589934592,1,0]
-274	hdfs://### HDFS PATH ###	3698	[1,2,4,8589934592,1,0]
-275	hdfs://### HDFS PATH ###	1638	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	1260	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	2856	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	362	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	4902	[1,2,4,8589934592,1,0]
-278	hdfs://### HDFS PATH ###	1544	[1,2,4,8589934592,1,0]
-278	hdfs://### HDFS PATH ###	80	[1,2,4,8589934592,1,0]
-28	hdfs://### HDFS PATH ###	5616	[1,2,4,8589934592,1,0]
-280	hdfs://### HDFS PATH ###	1226	[1,2,4,8589934592,1,0]
-280	hdfs://### HDFS PATH ###	3992	[1,2,4,8589934592,1,0]
-281	hdfs://### HDFS PATH ###	350	[1,2,4,8589934592,1,0]
-281	hdfs://### HDFS PATH ###	5548	[1,2,4,8589934592,1,0]
-282	hdfs://### HDFS PATH ###	2468	[1,2,4,8589934592,1,0]
-282	hdfs://### HDFS PATH ###	2722	[1,2,4,8589934592,1,0]
-283	hdfs://### HDFS PATH ###	4022	[1,2,4,8589934592,1,0]
-284	hdfs://### HDFS PATH ###	1708	[1,2,4,8589934592,1,0]
-285	hdfs://### HDFS PATH ###	5478	[1,2,4,8589934592,1,0]
-286	hdfs://### HDFS PATH ###	1404	[1,2,4,8589934592,1,0]
-287	hdfs://### HDFS PATH ###	490	[1,2,4,8589934592,1,0]
-288	hdfs://### HDFS PATH ###	2422	[1,2,4,8589934592,1,0]
-288	hdfs://### HDFS PATH ###	3840	[1,2,4,8589934592,1,0]
-289	hdfs://### HDFS PATH ###	1568	[1,2,4,8589934592,1,0]
-291	hdfs://### HDFS PATH ###	4582	[1,2,4,8589934592,1,0]
-292	hdfs://### HDFS PATH ###	466	[1,2,4,8589934592,1,0]
-296	hdfs://### HDFS PATH ###	3626	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	2168	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	4456	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	5386	[1,2,4,8589934592,1,0]
-30	hdfs://### HDFS PATH ###	3494	[1,2,4,8589934592,1,0]
-302	hdfs://### HDFS PATH ###	1034	[1,2,4,8589934592,1,0]
-305	hdfs://### HDFS PATH ###	4782	[1,2,4,8589934592,1,0]
-306	hdfs://### HDFS PATH ###	2880	[1,2,4,8589934592,1,0]
-307	hdfs://### HDFS PATH ###	2812	[1,2,4,8589934592,1,0]
-307	hdfs://### HDFS PATH ###	5672	[1,2,4,8589934592,1,0]
-308	hdfs://### HDFS PATH ###	2388	[1,2,4,8589934592,1,0]
-309	hdfs://### HDFS PATH ###	2904	[1,2,4,8589934592,1,0]
-309	hdfs://### HDFS PATH ###	790	[1,2,4,8589934592,1,0]
-310	hdfs://### HDFS PATH ###	4962	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	1000	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	1626	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	22	[1,2,4,8589934592,1,0]
-315	hdfs://### HDFS PATH ###	5594	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	1012	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	2576	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	3944	[1,2,4,8589934592,1,0]
-317	hdfs://### HDFS PATH ###	3104	[1,2,4,8589934592,1,0]
-317	hdfs://### HDFS PATH ###	4974	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	1602	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	2504	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	2516	[1,2,4,8589934592,1,0]
-321	hdfs://### HDFS PATH ###	3308	[1,2,4,8589934592,1,0]
-321	hdfs://### HDFS PATH ###	4090	[1,2,4,8589934592,1,0]
-322	hdfs://### HDFS PATH ###	2096	[1,2,4,8589934592,1,0]
-322	hdfs://### HDFS PATH ###	3250	[1,2,4,8589934592,1,0]
-323	hdfs://### HDFS PATH ###	4878	[1,2,4,8589934592,1,0]
-325	hdfs://### HDFS PATH ###	4890	[1,2,4,8589934592,1,0]
-325	hdfs://### HDFS PATH ###	862	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	2248	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	2928	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	338	[1,2,4,8589934592,1,0]
-33	hdfs://### HDFS PATH ###	3592	[1,2,4,8589934592,1,0]
-331	hdfs://### HDFS PATH ###	2988	[1,2,4,8589934592,1,0]
-331	hdfs://### HDFS PATH ###	4034	[1,2,4,8589934592,1,0]
-332	hdfs://### HDFS PATH ###	1614	[1,2,4,8589934592,1,0]
-333	hdfs://### HDFS PATH ###	1684	[1,2,4,8589934592,1,0]
-333	hdfs://### HDFS PATH ###	4986	[1,2,4,8589934592,1,0]
-335	hdfs://### HDFS PATH ###	4102	[1,2,4,8589934592,1,0]
-336	hdfs://### HDFS PATH ###	3148	[1,2,4,8589934592,1,0]
-338	hdfs://### HDFS PATH ###	526	[1,2,4,8589934592,1,0]
-339	hdfs://### HDFS PATH ###	956	[1,2,4,8589934592,1,0]
-34	hdfs://### HDFS PATH ###	3192	[1,2,4,8589934592,1,0]
-341	hdfs://### HDFS PATH ###	5406	[1,2,4,8589934592,1,0]
-342	hdfs://### HDFS PATH ###	3558	[1,2,4,8589934592,1,0]
-342	hdfs://### HDFS PATH ###	838	[1,2,4,8589934592,1,0]
-344	hdfs://### HDFS PATH ###	3674	[1,2,4,8589934592,1,0]
-344	hdfs://### HDFS PATH ###	5560	[1,2,4,8589934592,1,0]
-345	hdfs://### HDFS PATH ###	1082	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	1882	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	1960	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	4338	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	5490	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	5660	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	1238	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	3138	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	4012	[1,2,4,8589934592,1,0]
-351	hdfs://### HDFS PATH ###	4604	[1,2,4,8589934592,1,0]
-353	hdfs://### HDFS PATH ###	1812	[1,2,4,8589934592,1,0]
-353	hdfs://### HDFS PATH ###	5092	[1,2,4,8589934592,1,0]
-356	hdfs://### HDFS PATH ###	1284	[1,2,4,8589934592,1,0]
-360	hdfs://### HDFS PATH ###	4746	[1,2,4,8589934592,1,0]
-362	hdfs://### HDFS PATH ###	5454	[1,2,4,8589934592,1,0]
-364	hdfs://### HDFS PATH ###	2662	[1,2,4,8589934592,1,0]
-365	hdfs://### HDFS PATH ###	802	[1,2,4,8589934592,1,0]
-366	hdfs://### HDFS PATH ###	4138	[1,2,4,8589934592,1,0]
-367	hdfs://### HDFS PATH ###	3662	[1,2,4,8589934592,1,0]
-367	hdfs://### HDFS PATH ###	850	[1,2,4,8589934592,1,0]
-368	hdfs://### HDFS PATH ###	3602	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	186	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	2564	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	2952	[1,2,4,8589934592,1,0]
-37	hdfs://### HDFS PATH ###	328	[1,2,4,8589934592,1,0]
-37	hdfs://### HDFS PATH ###	5626	[1,2,4,8589934592,1,0]
-373	hdfs://### HDFS PATH ###	1824	[1,2,4,8589934592,1,0]
-374	hdfs://### HDFS PATH ###	268	[1,2,4,8589934592,1,0]
-375	hdfs://### HDFS PATH ###	5212	[1,2,4,8589934592,1,0]
-377	hdfs://### HDFS PATH ###	766	[1,2,4,8589934592,1,0]
-378	hdfs://### HDFS PATH ###	1152	[1,2,4,8589934592,1,0]
-379	hdfs://### HDFS PATH ###	5328	[1,2,4,8589934592,1,0]
-382	hdfs://### HDFS PATH ###	1320	[1,2,4,8589934592,1,0]
-382	hdfs://### HDFS PATH ###	4528	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	1788	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	5260	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	5316	[1,2,4,8589934592,1,0]
-386	hdfs://### HDFS PATH ###	1356	[1,2,4,8589934592,1,0]
-389	hdfs://### HDFS PATH ###	2916	[1,2,4,8589934592,1,0]
-392	hdfs://### HDFS PATH ###	2964	[1,2,4,8589934592,1,0]
-393	hdfs://### HDFS PATH ###	2132	[1,2,4,8589934592,1,0]
-394	hdfs://### HDFS PATH ###	562	[1,2,4,8589934592,1,0]
-395	hdfs://### HDFS PATH ###	2710	[1,2,4,8589934592,1,0]
-395	hdfs://### HDFS PATH ###	3116	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	3092	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	4372	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	706	[1,2,4,8589934592,1,0]
-397	hdfs://### HDFS PATH ###	4558	[1,2,4,8589934592,1,0]
-397	hdfs://### HDFS PATH ###	778	[1,2,4,8589934592,1,0]
-399	hdfs://### HDFS PATH ###	1296	[1,2,4,8589934592,1,0]
-399	hdfs://### HDFS PATH ###	694	[1,2,4,8589934592,1,0]
-4	hdfs://### HDFS PATH ###	1218	[1,2,4,8589934592,1,0]
-400	hdfs://### HDFS PATH ###	5778	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	138	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	3000	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	3828	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	4268	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	5224	[1,2,4,8589934592,1,0]
-402	hdfs://### HDFS PATH ###	3080	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	406	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	4162	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	5766	[1,2,4,8589934592,1,0]
-404	hdfs://### HDFS PATH ###	1776	[1,2,4,8589934592,1,0]
-404	hdfs://### HDFS PATH ###	2318	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	244	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	4220	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	4256	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	5152	[1,2,4,8589934592,1,0]
-407	hdfs://### HDFS PATH ###	5248	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	2528	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	4232	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	56	[1,2,4,8589934592,1,0]
-41	hdfs://### HDFS PATH ###	3388	[1,2,4,8589934592,1,0]
-411	hdfs://### HDFS PATH ###	1924	[1,2,4,8589934592,1,0]
-413	hdfs://### HDFS PATH ###	2600	[1,2,4,8589934592,1,0]
-413	hdfs://### HDFS PATH ###	610	[1,2,4,8589934592,1,0]
-414	hdfs://### HDFS PATH ###	4686	[1,2,4,8589934592,1,0]
-414	hdfs://### HDFS PATH ###	5696	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	430	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	4794	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	730	[1,2,4,8589934592,1,0]
-418	hdfs://### HDFS PATH ###	2204	[1,2,4,8589934592,1,0]
-419	hdfs://### HDFS PATH ###	2758	[1,2,4,8589934592,1,0]
-42	hdfs://### HDFS PATH ###	2030	[1,2,4,8589934592,1,0]
-42	hdfs://### HDFS PATH ###	3298	[1,2,4,8589934592,1,0]
-421	hdfs://### HDFS PATH ###	5236	[1,2,4,8589934592,1,0]
-424	hdfs://### HDFS PATH ###	4350	[1,2,4,8589934592,1,0]
-424	hdfs://### HDFS PATH ###	4504	[1,2,4,8589934592,1,0]
-427	hdfs://### HDFS PATH ###	1248	[1,2,4,8589934592,1,0]
-429	hdfs://### HDFS PATH ###	256	[1,2,4,8589934592,1,0]
-429	hdfs://### HDFS PATH ###	4842	[1,2,4,8589934592,1,0]
-43	hdfs://### HDFS PATH ###	2330	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	1532	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	3320	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	442	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	1994	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	4420	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	4480	[1,2,4,8589934592,1,0]
-432	hdfs://### HDFS PATH ###	3920	[1,2,4,8589934592,1,0]
-435	hdfs://### HDFS PATH ###	2834	[1,2,4,8589934592,1,0]
-436	hdfs://### HDFS PATH ###	2340	[1,2,4,8589934592,1,0]
-437	hdfs://### HDFS PATH ###	1368	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	1070	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	3884	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	4662	[1,2,4,8589934592,1,0]
-439	hdfs://### HDFS PATH ###	4734	[1,2,4,8589934592,1,0]
-439	hdfs://### HDFS PATH ###	826	[1,2,4,8589934592,1,0]
-44	hdfs://### HDFS PATH ###	4068	[1,2,4,8589934592,1,0]
-443	hdfs://### HDFS PATH ###	4866	[1,2,4,8589934592,1,0]
-444	hdfs://### HDFS PATH ###	4818	[1,2,4,8589934592,1,0]
-446	hdfs://### HDFS PATH ###	538	[1,2,4,8589934592,1,0]
-448	hdfs://### HDFS PATH ###	5636	[1,2,4,8589934592,1,0]
-449	hdfs://### HDFS PATH ###	3434	[1,2,4,8589934592,1,0]
-452	hdfs://### HDFS PATH ###	3024	[1,2,4,8589934592,1,0]
-453	hdfs://### HDFS PATH ###	3482	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	2144	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	4432	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	5200	[1,2,4,8589934592,1,0]
-455	hdfs://### HDFS PATH ###	976	[1,2,4,8589934592,1,0]
-457	hdfs://### HDFS PATH ###	2446	[1,2,4,8589934592,1,0]
-458	hdfs://### HDFS PATH ###	3356	[1,2,4,8589934592,1,0]
-458	hdfs://### HDFS PATH ###	5442	[1,2,4,8589934592,1,0]
-459	hdfs://### HDFS PATH ###	1450	[1,2,4,8589934592,1,0]
-459	hdfs://### HDFS PATH ###	550	[1,2,4,8589934592,1,0]
-460	hdfs://### HDFS PATH ###	5010	[1,2,4,8589934592,1,0]
-462	hdfs://### HDFS PATH ###	5128	[1,2,4,8589934592,1,0]
-462	hdfs://### HDFS PATH ###	5350	[1,2,4,8589934592,1,0]
-463	hdfs://### HDFS PATH ###	1982	[1,2,4,8589934592,1,0]
-463	hdfs://### HDFS PATH ###	3980	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	1894	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	4126	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	658	[1,2,4,8589934592,1,0]
-467	hdfs://### HDFS PATH ###	3908	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	2120	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	2376	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	3526	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	4950	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	1380	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	2364	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	292	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	3968	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	5582	[1,2,4,8589934592,1,0]
-47	hdfs://### HDFS PATH ###	1198	[1,2,4,8589934592,1,0]
-470	hdfs://### HDFS PATH ###	2540	[1,2,4,8589934592,1,0]
-472	hdfs://### HDFS PATH ###	3238	[1,2,4,8589934592,1,0]
-475	hdfs://### HDFS PATH ###	898	[1,2,4,8589934592,1,0]
-477	hdfs://### HDFS PATH ###	5708	[1,2,4,8589934592,1,0]
-478	hdfs://### HDFS PATH ###	4444	[1,2,4,8589934592,1,0]
-478	hdfs://### HDFS PATH ###	4926	[1,2,4,8589934592,1,0]
-479	hdfs://### HDFS PATH ###	4770	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	3816	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	4570	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	5058	[1,2,4,8589934592,1,0]
-481	hdfs://### HDFS PATH ###	2434	[1,2,4,8589934592,1,0]
-482	hdfs://### HDFS PATH ###	586	[1,2,4,8589934592,1,0]
-483	hdfs://### HDFS PATH ###	4174	[1,2,4,8589934592,1,0]
-484	hdfs://### HDFS PATH ###	102	[1,2,4,8589934592,1,0]
-485	hdfs://### HDFS PATH ###	3734	[1,2,4,8589934592,1,0]
-487	hdfs://### HDFS PATH ###	3804	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	1128	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	1800	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	3344	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	742	[1,2,4,8589934592,1,0]
-490	hdfs://### HDFS PATH ###	2640	[1,2,4,8589934592,1,0]
-491	hdfs://### HDFS PATH ###	4710	[1,2,4,8589934592,1,0]
-492	hdfs://### HDFS PATH ###	3410	[1,2,4,8589934592,1,0]
-492	hdfs://### HDFS PATH ###	5362	[1,2,4,8589934592,1,0]
-493	hdfs://### HDFS PATH ###	4998	[1,2,4,8589934592,1,0]
-494	hdfs://### HDFS PATH ###	622	[1,2,4,8589934592,1,0]
-495	hdfs://### HDFS PATH ###	316	[1,2,4,8589934592,1,0]
-496	hdfs://### HDFS PATH ###	2076	[1,2,4,8589934592,1,0]
-497	hdfs://### HDFS PATH ###	3068	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	1332	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	3262	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	5418	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	3060	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	3864	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	4540	[1,2,4,8589934592,1,0]
-51	hdfs://### HDFS PATH ###	1462	[1,2,4,8589934592,1,0]
-51	hdfs://### HDFS PATH ###	2308	[1,2,4,8589934592,1,0]
-53	hdfs://### HDFS PATH ###	4186	[1,2,4,8589934592,1,0]
-54	hdfs://### HDFS PATH ###	1440	[1,2,4,8589934592,1,0]
-57	hdfs://### HDFS PATH ###	1024	[1,2,4,8589934592,1,0]
-58	hdfs://### HDFS PATH ###	1906	[1,2,4,8589934592,1,0]
-58	hdfs://### HDFS PATH ###	3128	[1,2,4,8589934592,1,0]
-64	hdfs://### HDFS PATH ###	3516	[1,2,4,8589934592,1,0]
-65	hdfs://### HDFS PATH ###	1592	[1,2,4,8589934592,1,0]
-66	hdfs://### HDFS PATH ###	198	[1,2,4,8589934592,1,0]
-67	hdfs://### HDFS PATH ###	1754	[1,2,4,8589934592,1,0]
-67	hdfs://### HDFS PATH ###	5306	[1,2,4,8589934592,1,0]
-69	hdfs://### HDFS PATH ###	3570	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	3794	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	4548	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	4640	[1,2,4,8589934592,1,0]
-72	hdfs://### HDFS PATH ###	1208	[1,2,4,8589934592,1,0]
-72	hdfs://### HDFS PATH ###	2792	[1,2,4,8589934592,1,0]
-74	hdfs://### HDFS PATH ###	3548	[1,2,4,8589934592,1,0]
-76	hdfs://### HDFS PATH ###	3378	[1,2,4,8589934592,1,0]
-76	hdfs://### HDFS PATH ###	3538	[1,2,4,8589934592,1,0]
-77	hdfs://### HDFS PATH ###	2622	[1,2,4,8589934592,1,0]
-78	hdfs://### HDFS PATH ###	3368	[1,2,4,8589934592,1,0]
-8	hdfs://### HDFS PATH ###	1916	[1,2,4,8589934592,1,0]
-80	hdfs://### HDFS PATH ###	4058	[1,2,4,8589934592,1,0]
-82	hdfs://### HDFS PATH ###	396	[1,2,4,8589934592,1,0]
-83	hdfs://### HDFS PATH ###	1674	[1,2,4,8589934592,1,0]
-83	hdfs://### HDFS PATH ###	5070	[1,2,4,8589934592,1,0]
-84	hdfs://### HDFS PATH ###	1872	[1,2,4,8589934592,1,0]
-84	hdfs://### HDFS PATH ###	5606	[1,2,4,8589934592,1,0]
-85	hdfs://### HDFS PATH ###	2612	[1,2,4,8589934592,1,0]
-86	hdfs://### HDFS PATH ###	12	[1,2,4,8589934592,1,0]
-87	hdfs://### HDFS PATH ###	2652	[1,2,4,8589934592,1,0]
-9	hdfs://### HDFS PATH ###	5398	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	2802	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	4304	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	5744	[1,2,4,8589934592,1,0]
-92	hdfs://### HDFS PATH ###	1176	[1,2,4,8589934592,1,0]
-95	hdfs://### HDFS PATH ###	2400	[1,2,4,8589934592,1,0]
-95	hdfs://### HDFS PATH ###	3160	[1,2,4,8589934592,1,0]
-96	hdfs://### HDFS PATH ###	2216	[1,2,4,8589934592,1,0]
-97	hdfs://### HDFS PATH ###	5572	[1,2,4,8589934592,1,0]
-97	hdfs://### HDFS PATH ###	5802	[1,2,4,8589934592,1,0]
-98	hdfs://### HDFS PATH ###	2458	[1,2,4,8589934592,1,0]
-98	hdfs://### HDFS PATH ###	92	[1,2,4,8589934592,1,0]
-PREHOOK: query: SELECT * FROM default__src_src2_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src2_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src2_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src2_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-val_0	hdfs://### HDFS PATH ###	2088	[1,2,4,8589934592,1,0]
-val_0	hdfs://### HDFS PATH ###	2632	[1,2,4,8589934592,1,0]
-val_0	hdfs://### HDFS PATH ###	968	[1,2,4,8589934592,1,0]
-val_10	hdfs://### HDFS PATH ###	2846	[1,2,4,8589934592,1,0]
-val_100	hdfs://### HDFS PATH ###	2156	[1,2,4,8589934592,1,0]
-val_100	hdfs://### HDFS PATH ###	5374	[1,2,4,8589934592,1,0]
-val_103	hdfs://### HDFS PATH ###	1484	[1,2,4,8589934592,1,0]
-val_103	hdfs://### HDFS PATH ###	3614	[1,2,4,8589934592,1,0]
-val_104	hdfs://### HDFS PATH ###	4114	[1,2,4,8589934592,1,0]
-val_104	hdfs://### HDFS PATH ###	4628	[1,2,4,8589934592,1,0]
-val_105	hdfs://### HDFS PATH ###	4196	[1,2,4,8589934592,1,0]
-val_11	hdfs://### HDFS PATH ###	3170	[1,2,4,8589934592,1,0]
-val_111	hdfs://### HDFS PATH ###	1186	[1,2,4,8589934592,1,0]
-val_113	hdfs://### HDFS PATH ###	3638	[1,2,4,8589934592,1,0]
-val_113	hdfs://### HDFS PATH ###	920	[1,2,4,8589934592,1,0]
-val_114	hdfs://### HDFS PATH ###	4280	[1,2,4,8589934592,1,0]
-val_116	hdfs://### HDFS PATH ###	3746	[1,2,4,8589934592,1,0]
-val_118	hdfs://### HDFS PATH ###	2686	[1,2,4,8589934592,1,0]
-val_118	hdfs://### HDFS PATH ###	2780	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	2064	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	3332	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	4674	[1,2,4,8589934592,1,0]
-val_12	hdfs://### HDFS PATH ###	1720	[1,2,4,8589934592,1,0]
-val_12	hdfs://### HDFS PATH ###	4362	[1,2,4,8589934592,1,0]
-val_120	hdfs://### HDFS PATH ###	2284	[1,2,4,8589934592,1,0]
-val_120	hdfs://### HDFS PATH ###	4830	[1,2,4,8589934592,1,0]
-val_125	hdfs://### HDFS PATH ###	1344	[1,2,4,8589934592,1,0]
-val_125	hdfs://### HDFS PATH ###	4468	[1,2,4,8589934592,1,0]
-val_126	hdfs://### HDFS PATH ###	5732	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	208	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	3896	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	988	[1,2,4,8589934592,1,0]
-val_129	hdfs://### HDFS PATH ###	1094	[1,2,4,8589934592,1,0]
-val_129	hdfs://### HDFS PATH ###	2040	[1,2,4,8589934592,1,0]
-val_131	hdfs://### HDFS PATH ###	2296	[1,2,4,8589934592,1,0]
-val_133	hdfs://### HDFS PATH ###	5164	[1,2,4,8589934592,1,0]
-val_134	hdfs://### HDFS PATH ###	2698	[1,2,4,8589934592,1,0]
-val_134	hdfs://### HDFS PATH ###	5294	[1,2,4,8589934592,1,0]
-val_136	hdfs://### HDFS PATH ###	5080	[1,2,4,8589934592,1,0]
-val_137	hdfs://### HDFS PATH ###	1650	[1,2,4,8589934592,1,0]
-val_137	hdfs://### HDFS PATH ###	2552	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	1472	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	1848	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	2734	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	3470	[1,2,4,8589934592,1,0]
-val_143	hdfs://### HDFS PATH ###	3226	[1,2,4,8589934592,1,0]
-val_145	hdfs://### HDFS PATH ###	304	[1,2,4,8589934592,1,0]
-val_146	hdfs://### HDFS PATH ###	232	[1,2,4,8589934592,1,0]
-val_146	hdfs://### HDFS PATH ###	5430	[1,2,4,8589934592,1,0]
-val_149	hdfs://### HDFS PATH ###	1058	[1,2,4,8589934592,1,0]
-val_149	hdfs://### HDFS PATH ###	3422	[1,2,4,8589934592,1,0]
-val_15	hdfs://### HDFS PATH ###	2770	[1,2,4,8589934592,1,0]
-val_15	hdfs://### HDFS PATH ###	386	[1,2,4,8589934592,1,0]
-val_150	hdfs://### HDFS PATH ###	150	[1,2,4,8589934592,1,0]
-val_152	hdfs://### HDFS PATH ###	280	[1,2,4,8589934592,1,0]
-val_152	hdfs://### HDFS PATH ###	5648	[1,2,4,8589934592,1,0]
-val_153	hdfs://### HDFS PATH ###	502	[1,2,4,8589934592,1,0]
-val_155	hdfs://### HDFS PATH ###	932	[1,2,4,8589934592,1,0]
-val_156	hdfs://### HDFS PATH ###	2352	[1,2,4,8589934592,1,0]
-val_157	hdfs://### HDFS PATH ###	1140	[1,2,4,8589934592,1,0]
-val_158	hdfs://### HDFS PATH ###	2052	[1,2,4,8589934592,1,0]
-val_160	hdfs://### HDFS PATH ###	3274	[1,2,4,8589934592,1,0]
-val_162	hdfs://### HDFS PATH ###	754	[1,2,4,8589934592,1,0]
-val_163	hdfs://### HDFS PATH ###	4650	[1,2,4,8589934592,1,0]
-val_164	hdfs://### HDFS PATH ###	4408	[1,2,4,8589934592,1,0]
-val_164	hdfs://### HDFS PATH ###	4492	[1,2,4,8589934592,1,0]
-val_165	hdfs://### HDFS PATH ###	2236	[1,2,4,8589934592,1,0]
-val_165	hdfs://### HDFS PATH ###	44	[1,2,4,8589934592,1,0]
-val_166	hdfs://### HDFS PATH ###	418	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	3686	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	5502	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	874	[1,2,4,8589934592,1,0]
-val_168	hdfs://### HDFS PATH ###	3180	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	1308	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	2588	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	4854	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	5754	[1,2,4,8589934592,1,0]
-val_17	hdfs://### HDFS PATH ###	910	[1,2,4,8589934592,1,0]
-val_170	hdfs://### HDFS PATH ###	1106	[1,2,4,8589934592,1,0]
-val_172	hdfs://### HDFS PATH ###	2018	[1,2,4,8589934592,1,0]
-val_172	hdfs://### HDFS PATH ###	5104	[1,2,4,8589934592,1,0]
-val_174	hdfs://### HDFS PATH ###	598	[1,2,4,8589934592,1,0]
-val_174	hdfs://### HDFS PATH ###	682	[1,2,4,8589934592,1,0]
-val_175	hdfs://### HDFS PATH ###	4150	[1,2,4,8589934592,1,0]
-val_175	hdfs://### HDFS PATH ###	5176	[1,2,4,8589934592,1,0]
-val_176	hdfs://### HDFS PATH ###	1428	[1,2,4,8589934592,1,0]
-val_176	hdfs://### HDFS PATH ###	1556	[1,2,4,8589934592,1,0]
-val_177	hdfs://### HDFS PATH ###	3036	[1,2,4,8589934592,1,0]
-val_178	hdfs://### HDFS PATH ###	4938	[1,2,4,8589934592,1,0]
-val_179	hdfs://### HDFS PATH ###	2006	[1,2,4,8589934592,1,0]
-val_179	hdfs://### HDFS PATH ###	2674	[1,2,4,8589934592,1,0]
-val_18	hdfs://### HDFS PATH ###	5340	[1,2,4,8589934592,1,0]
-val_18	hdfs://### HDFS PATH ###	5514	[1,2,4,8589934592,1,0]
-val_180	hdfs://### HDFS PATH ###	1696	[1,2,4,8589934592,1,0]
-val_181	hdfs://### HDFS PATH ###	1742	[1,2,4,8589934592,1,0]
-val_183	hdfs://### HDFS PATH ###	5536	[1,2,4,8589934592,1,0]
-val_186	hdfs://### HDFS PATH ###	5466	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	1416	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	2492	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	4516	[1,2,4,8589934592,1,0]
-val_189	hdfs://### HDFS PATH ###	5188	[1,2,4,8589934592,1,0]
-val_19	hdfs://### HDFS PATH ###	2824	[1,2,4,8589934592,1,0]
-val_190	hdfs://### HDFS PATH ###	4244	[1,2,4,8589934592,1,0]
-val_191	hdfs://### HDFS PATH ###	2192	[1,2,4,8589934592,1,0]
-val_191	hdfs://### HDFS PATH ###	3852	[1,2,4,8589934592,1,0]
-val_192	hdfs://### HDFS PATH ###	1392	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	126	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	4078	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	514	[1,2,4,8589934592,1,0]
-val_194	hdfs://### HDFS PATH ###	5684	[1,2,4,8589934592,1,0]
-val_195	hdfs://### HDFS PATH ###	3286	[1,2,4,8589934592,1,0]
-val_195	hdfs://### HDFS PATH ###	886	[1,2,4,8589934592,1,0]
-val_196	hdfs://### HDFS PATH ###	2410	[1,2,4,8589934592,1,0]
-val_197	hdfs://### HDFS PATH ###	2108	[1,2,4,8589934592,1,0]
-val_197	hdfs://### HDFS PATH ###	2480	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	2180	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	4806	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	646	[1,2,4,8589934592,1,0]
-val_2	hdfs://### HDFS PATH ###	4004	[1,2,4,8589934592,1,0]
-val_20	hdfs://### HDFS PATH ###	1118	[1,2,4,8589934592,1,0]
-val_200	hdfs://### HDFS PATH ###	4698	[1,2,4,8589934592,1,0]
-val_200	hdfs://### HDFS PATH ###	5790	[1,2,4,8589934592,1,0]
-val_201	hdfs://### HDFS PATH ###	4384	[1,2,4,8589934592,1,0]
-val_202	hdfs://### HDFS PATH ###	3932	[1,2,4,8589934592,1,0]
-val_203	hdfs://### HDFS PATH ###	4314	[1,2,4,8589934592,1,0]
-val_203	hdfs://### HDFS PATH ###	944	[1,2,4,8589934592,1,0]
-val_205	hdfs://### HDFS PATH ###	1046	[1,2,4,8589934592,1,0]
-val_205	hdfs://### HDFS PATH ###	2272	[1,2,4,8589934592,1,0]
-val_207	hdfs://### HDFS PATH ###	5022	[1,2,4,8589934592,1,0]
-val_207	hdfs://### HDFS PATH ###	634	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	1272	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	1948	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	670	[1,2,4,8589934592,1,0]
-val_209	hdfs://### HDFS PATH ###	3504	[1,2,4,8589934592,1,0]
-val_209	hdfs://### HDFS PATH ###	374	[1,2,4,8589934592,1,0]
-val_213	hdfs://### HDFS PATH ###	1508	[1,2,4,8589934592,1,0]
-val_213	hdfs://### HDFS PATH ###	220	[1,2,4,8589934592,1,0]
-val_214	hdfs://### HDFS PATH ###	5116	[1,2,4,8589934592,1,0]
-val_216	hdfs://### HDFS PATH ###	1520	[1,2,4,8589934592,1,0]
-val_216	hdfs://### HDFS PATH ###	3650	[1,2,4,8589934592,1,0]
-val_217	hdfs://### HDFS PATH ###	1860	[1,2,4,8589934592,1,0]
-val_217	hdfs://### HDFS PATH ###	4396	[1,2,4,8589934592,1,0]
-val_218	hdfs://### HDFS PATH ###	3446	[1,2,4,8589934592,1,0]
-val_219	hdfs://### HDFS PATH ###	3710	[1,2,4,8589934592,1,0]
-val_219	hdfs://### HDFS PATH ###	478	[1,2,4,8589934592,1,0]
-val_221	hdfs://### HDFS PATH ###	1164	[1,2,4,8589934592,1,0]
-val_221	hdfs://### HDFS PATH ###	1580	[1,2,4,8589934592,1,0]
-val_222	hdfs://### HDFS PATH ###	5720	[1,2,4,8589934592,1,0]
-val_223	hdfs://### HDFS PATH ###	3398	[1,2,4,8589934592,1,0]
-val_223	hdfs://### HDFS PATH ###	3758	[1,2,4,8589934592,1,0]
-val_224	hdfs://### HDFS PATH ###	174	[1,2,4,8589934592,1,0]
-val_224	hdfs://### HDFS PATH ###	2892	[1,2,4,8589934592,1,0]
-val_226	hdfs://### HDFS PATH ###	3048	[1,2,4,8589934592,1,0]
-val_228	hdfs://### HDFS PATH ###	3458	[1,2,4,8589934592,1,0]
-val_229	hdfs://### HDFS PATH ###	3202	[1,2,4,8589934592,1,0]
-val_229	hdfs://### HDFS PATH ###	3956	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	1730	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	1936	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	2260	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	3580	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	4914	[1,2,4,8589934592,1,0]
-val_233	hdfs://### HDFS PATH ###	3214	[1,2,4,8589934592,1,0]
-val_233	hdfs://### HDFS PATH ###	5140	[1,2,4,8589934592,1,0]
-val_235	hdfs://### HDFS PATH ###	4046	[1,2,4,8589934592,1,0]
-val_237	hdfs://### HDFS PATH ###	4722	[1,2,4,8589934592,1,0]
-val_237	hdfs://### HDFS PATH ###	574	[1,2,4,8589934592,1,0]
-val_238	hdfs://### HDFS PATH ###	0	[1,2,4,8589934592,1,0]
-val_238	hdfs://### HDFS PATH ###	2746	[1,2,4,8589934592,1,0]
-val_239	hdfs://### HDFS PATH ###	1496	[1,2,4,8589934592,1,0]
-val_239	hdfs://### HDFS PATH ###	3722	[1,2,4,8589934592,1,0]
-val_24	hdfs://### HDFS PATH ###	1972	[1,2,4,8589934592,1,0]
-val_24	hdfs://### HDFS PATH ###	4594	[1,2,4,8589934592,1,0]
-val_241	hdfs://### HDFS PATH ###	1662	[1,2,4,8589934592,1,0]
-val_242	hdfs://### HDFS PATH ###	2940	[1,2,4,8589934592,1,0]
-val_242	hdfs://### HDFS PATH ###	3012	[1,2,4,8589934592,1,0]
-val_244	hdfs://### HDFS PATH ###	3872	[1,2,4,8589934592,1,0]
-val_247	hdfs://### HDFS PATH ###	718	[1,2,4,8589934592,1,0]
-val_248	hdfs://### HDFS PATH ###	4758	[1,2,4,8589934592,1,0]
-val_249	hdfs://### HDFS PATH ###	5034	[1,2,4,8589934592,1,0]
-val_252	hdfs://### HDFS PATH ###	454	[1,2,4,8589934592,1,0]
-val_255	hdfs://### HDFS PATH ###	4616	[1,2,4,8589934592,1,0]
-val_255	hdfs://### HDFS PATH ###	68	[1,2,4,8589934592,1,0]
-val_256	hdfs://### HDFS PATH ###	3770	[1,2,4,8589934592,1,0]
-val_256	hdfs://### HDFS PATH ###	5272	[1,2,4,8589934592,1,0]
-val_257	hdfs://### HDFS PATH ###	4208	[1,2,4,8589934592,1,0]
-val_258	hdfs://### HDFS PATH ###	4292	[1,2,4,8589934592,1,0]
-val_26	hdfs://### HDFS PATH ###	2226	[1,2,4,8589934592,1,0]
-val_26	hdfs://### HDFS PATH ###	5284	[1,2,4,8589934592,1,0]
-val_260	hdfs://### HDFS PATH ###	1764	[1,2,4,8589934592,1,0]
-val_262	hdfs://### HDFS PATH ###	4326	[1,2,4,8589934592,1,0]
-val_263	hdfs://### HDFS PATH ###	3782	[1,2,4,8589934592,1,0]
-val_265	hdfs://### HDFS PATH ###	114	[1,2,4,8589934592,1,0]
-val_265	hdfs://### HDFS PATH ###	5046	[1,2,4,8589934592,1,0]
-val_266	hdfs://### HDFS PATH ###	814	[1,2,4,8589934592,1,0]
-val_27	hdfs://### HDFS PATH ###	34	[1,2,4,8589934592,1,0]
-val_272	hdfs://### HDFS PATH ###	1836	[1,2,4,8589934592,1,0]
-val_272	hdfs://### HDFS PATH ###	2976	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	162	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	2868	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	5524	[1,2,4,8589934592,1,0]
-val_274	hdfs://### HDFS PATH ###	3698	[1,2,4,8589934592,1,0]
-val_275	hdfs://### HDFS PATH ###	1638	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	1260	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	2856	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	362	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	4902	[1,2,4,8589934592,1,0]
-val_278	hdfs://### HDFS PATH ###	1544	[1,2,4,8589934592,1,0]
-val_278	hdfs://### HDFS PATH ###	80	[1,2,4,8589934592,1,0]
-val_28	hdfs://### HDFS PATH ###	5616	[1,2,4,8589934592,1,0]
-val_280	hdfs://### HDFS PATH ###	1226	[1,2,4,8589934592,1,0]
-val_280	hdfs://### HDFS PATH ###	3992	[1,2,4,8589934592,1,0]
-val_281	hdfs://### HDFS PATH ###	350	[1,2,4,8589934592,1,0]
-val_281	hdfs://### HDFS PATH ###	5548	[1,2,4,8589934592,1,0]
-val_282	hdfs://### HDFS PATH ###	2468	[1,2,4,8589934592,1,0]
-val_282	hdfs://### HDFS PATH ###	2722	[1,2,4,8589934592,1,0]
-val_283	hdfs://### HDFS PATH ###	4022	[1,2,4,8589934592,1,0]
-val_284	hdfs://### HDFS PATH ###	1708	[1,2,4,8589934592,1,0]
-val_285	hdfs://### HDFS PATH ###	5478	[1,2,4,8589934592,1,0]
-val_286	hdfs://### HDFS PATH ###	1404	[1,2,4,8589934592,1,0]
-val_287	hdfs://### HDFS PATH ###	490	[1,2,4,8589934592,1,0]
-val_288	hdfs://### HDFS PATH ###	2422	[1,2,4,8589934592,1,0]
-val_288	hdfs://### HDFS PATH ###	3840	[1,2,4,8589934592,1,0]
-val_289	hdfs://### HDFS PATH ###	1568	[1,2,4,8589934592,1,0]
-val_291	hdfs://### HDFS PATH ###	4582	[1,2,4,8589934592,1,0]
-val_292	hdfs://### HDFS PATH ###	466	[1,2,4,8589934592,1,0]
-val_296	hdfs://### HDFS PATH ###	3626	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	2168	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	4456	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	5386	[1,2,4,8589934592,1,0]
-val_30	hdfs://### HDFS PATH ###	3494	[1,2,4,8589934592,1,0]
-val_302	hdfs://### HDFS PATH ###	1034	[1,2,4,8589934592,1,0]
-val_305	hdfs://### HDFS PATH ###	4782	[1,2,4,8589934592,1,0]
-val_306	hdfs://### HDFS PATH ###	2880	[1,2,4,8589934592,1,0]
-val_307	hdfs://### HDFS PATH ###	2812	[1,2,4,8589934592,1,0]
-val_307	hdfs://### HDFS PATH ###	5672	[1,2,4,8589934592,1,0]
-val_308	hdfs://### HDFS PATH ###	2388	[1,2,4,8589934592,1,0]
-val_309	hdfs://### HDFS PATH ###	2904	[1,2,4,8589934592,1,0]
-val_309	hdfs://### HDFS PATH ###	790	[1,2,4,8589934592,1,0]
-val_310	hdfs://### HDFS PATH ###	4962	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	1000	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	1626	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	22	[1,2,4,8589934592,1,0]
-val_315	hdfs://### HDFS PATH ###	5594	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	1012	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	2576	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	3944	[1,2,4,8589934592,1,0]
-val_317	hdfs://### HDFS PATH ###	3104	[1,2,4,8589934592,1,0]
-val_317	hdfs://### HDFS PATH ###	4974	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	1602	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	2504	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	2516	[1,2,4,8589934592,1,0]
-val_321	hdfs://### HDFS PATH ###	3308	[1,2,4,8589934592,1,0]
-val_321	hdfs://### HDFS PATH ###	4090	[1,2,4,8589934592,1,0]
-val_322	hdfs://### HDFS PATH ###	2096	[1,2,4,8589934592,1,0]
-val_322	hdfs://### HDFS PATH ###	3250	[1,2,4,8589934592,1,0]
-val_323	hdfs://### HDFS PATH ###	4878	[1,2,4,8589934592,1,0]
-val_325	hdfs://### HDFS PATH ###	4890	[1,2,4,8589934592,1,0]
-val_325	hdfs://### HDFS PATH ###	862	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	2248	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	2928	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	338	[1,2,4,8589934592,1,0]
-val_33	hdfs://### HDFS PATH ###	3592	[1,2,4,8589934592,1,0]
-val_331	hdfs://### HDFS PATH ###	2988	[1,2,4,8589934592,1,0]
-val_331	hdfs://### HDFS PATH ###	4034	[1,2,4,8589934592,1,0]
-val_332	hdfs://### HDFS PATH ###	1614	[1,2,4,8589934592,1,0]
-val_333	hdfs://### HDFS PATH ###	1684	[1,2,4,8589934592,1,0]
-val_333	hdfs://### HDFS PATH ###	4986	[1,2,4,8589934592,1,0]
-val_335	hdfs://### HDFS PATH ###	4102	[1,2,4,8589934592,1,0]
-val_336	hdfs://### HDFS PATH ###	3148	[1,2,4,8589934592,1,0]
-val_338	hdfs://### HDFS PATH ###	526	[1,2,4,8589934592,1,0]
-val_339	hdfs://### HDFS PATH ###	956	[1,2,4,8589934592,1,0]
-val_34	hdfs://### HDFS PATH ###	3192	[1,2,4,8589934592,1,0]
-val_341	hdfs://### HDFS PATH ###	5406	[1,2,4,8589934592,1,0]
-val_342	hdfs://### HDFS PATH ###	3558	[1,2,4,8589934592,1,0]
-val_342	hdfs://### HDFS PATH ###	838	[1,2,4,8589934592,1,0]
-val_344	hdfs://### HDFS PATH ###	3674	[1,2,4,8589934592,1,0]
-val_344	hdfs://### HDFS PATH ###	5560	[1,2,4,8589934592,1,0]
-val_345	hdfs://### HDFS PATH ###	1082	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	1882	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	1960	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	4338	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	5490	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	5660	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	1238	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	3138	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	4012	[1,2,4,8589934592,1,0]
-val_351	hdfs://### HDFS PATH ###	4604	[1,2,4,8589934592,1,0]
-val_353	hdfs://### HDFS PATH ###	1812	[1,2,4,8589934592,1,0]
-val_353	hdfs://### HDFS PATH ###	5092	[1,2,4,8589934592,1,0]
-val_356	hdfs://### HDFS PATH ###	1284	[1,2,4,8589934592,1,0]
-val_360	hdfs://### HDFS PATH ###	4746	[1,2,4,8589934592,1,0]
-val_362	hdfs://### HDFS PATH ###	5454	[1,2,4,8589934592,1,0]
-val_364	hdfs://### HDFS PATH ###	2662	[1,2,4,8589934592,1,0]
-val_365	hdfs://### HDFS PATH ###	802	[1,2,4,8589934592,1,0]
-val_366	hdfs://### HDFS PATH ###	4138	[1,2,4,8589934592,1,0]
-val_367	hdfs://### HDFS PATH ###	3662	[1,2,4,8589934592,1,0]
-val_367	hdfs://### HDFS PATH ###	850	[1,2,4,8589934592,1,0]
-val_368	hdfs://### HDFS PATH ###	3602	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	186	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	2564	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	2952	[1,2,4,8589934592,1,0]
-val_37	hdfs://### HDFS PATH ###	328	[1,2,4,8589934592,1,0]
-val_37	hdfs://### HDFS PATH ###	5626	[1,2,4,8589934592,1,0]
-val_373	hdfs://### HDFS PATH ###	1824	[1,2,4,8589934592,1,0]
-val_374	hdfs://### HDFS PATH ###	268	[1,2,4,8589934592,1,0]
-val_375	hdfs://### HDFS PATH ###	5212	[1,2,4,8589934592,1,0]
-val_377	hdfs://### HDFS PATH ###	766	[1,2,4,8589934592,1,0]
-val_378	hdfs://### HDFS PATH ###	1152	[1,2,4,8589934592,1,0]
-val_379	hdfs://### HDFS PATH ###	5328	[1,2,4,8589934592,1,0]
-val_382	hdfs://### HDFS PATH ###	1320	[1,2,4,8589934592,1,0]
-val_382	hdfs://### HDFS PATH ###	4528	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	1788	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	5260	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	5316	[1,2,4,8589934592,1,0]
-val_386	hdfs://### HDFS PATH ###	1356	[1,2,4,8589934592,1,0]
-val_389	hdfs://### HDFS PATH ###	2916	[1,2,4,8589934592,1,0]
-val_392	hdfs://### HDFS PATH ###	2964	[1,2,4,8589934592,1,0]
-val_393	hdfs://### HDFS PATH ###	2132	[1,2,4,8589934592,1,0]
-val_394	hdfs://### HDFS PATH ###	562	[1,2,4,8589934592,1,0]
-val_395	hdfs://### HDFS PATH ###	2710	[1,2,4,8589934592,1,0]
-val_395	hdfs://### HDFS PATH ###	3116	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	3092	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	4372	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	706	[1,2,4,8589934592,1,0]
-val_397	hdfs://### HDFS PATH ###	4558	[1,2,4,8589934592,1,0]
-val_397	hdfs://### HDFS PATH ###	778	[1,2,4,8589934592,1,0]
-val_399	hdfs://### HDFS PATH ###	1296	[1,2,4,8589934592,1,0]
-val_399	hdfs://### HDFS PATH ###	694	[1,2,4,8589934592,1,0]
-val_4	hdfs://### HDFS PATH ###	1218	[1,2,4,8589934592,1,0]
-val_400	hdfs://### HDFS PATH ###	5778	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	138	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	3000	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	3828	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	4268	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	5224	[1,2,4,8589934592,1,0]
-val_402	hdfs://### HDFS PATH ###	3080	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	406	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	4162	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	5766	[1,2,4,8589934592,1,0]
-val_404	hdfs://### HDFS PATH ###	1776	[1,2,4,8589934592,1,0]
-val_404	hdfs://### HDFS PATH ###	2318	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	244	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	4220	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	4256	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	5152	[1,2,4,8589934592,1,0]
-val_407	hdfs://### HDFS PATH ###	5248	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	2528	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	4232	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	56	[1,2,4,8589934592,1,0]
-val_41	hdfs://### HDFS PATH ###	3388	[1,2,4,8589934592,1,0]
-val_411	hdfs://### HDFS PATH ###	1924	[1,2,4,8589934592,1,0]
-val_413	hdfs://### HDFS PATH ###	2600	[1,2,4,8589934592,1,0]
-val_413	hdfs://### HDFS PATH ###	610	[1,2,4,8589934592,1,0]
-val_414	hdfs://### HDFS PATH ###	4686	[1,2,4,8589934592,1,0]
-val_414	hdfs://### HDFS PATH ###	5696	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	430	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	4794	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	730	[1,2,4,8589934592,1,0]
-val_418	hdfs://### HDFS PATH ###	2204	[1,2,4,8589934592,1,0]
-val_419	hdfs://### HDFS PATH ###	2758	[1,2,4,8589934592,1,0]
-val_42	hdfs://### HDFS PATH ###	2030	[1,2,4,8589934592,1,0]
-val_42	hdfs://### HDFS PATH ###	3298	[1,2,4,8589934592,1,0]
-val_421	hdfs://### HDFS PATH ###	5236	[1,2,4,8589934592,1,0]
-val_424	hdfs://### HDFS PATH ###	4350	[1,2,4,8589934592,1,0]
-val_424	hdfs://### HDFS PATH ###	4504	[1,2,4,8589934592,1,0]
-val_427	hdfs://### HDFS PATH ###	1248	[1,2,4,8589934592,1,0]
-val_429	hdfs://### HDFS PATH ###	256	[1,2,4,8589934592,1,0]
-val_429	hdfs://### HDFS PATH ###	4842	[1,2,4,8589934592,1,0]
-val_43	hdfs://### HDFS PATH ###	2330	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	1532	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	3320	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	442	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	1994	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	4420	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	4480	[1,2,4,8589934592,1,0]
-val_432	hdfs://### HDFS PATH ###	3920	[1,2,4,8589934592,1,0]
-val_435	hdfs://### HDFS PATH ###	2834	[1,2,4,8589934592,1,0]
-val_436	hdfs://### HDFS PATH ###	2340	[1,2,4,8589934592,1,0]
-val_437	hdfs://### HDFS PATH ###	1368	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	1070	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	3884	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	4662	[1,2,4,8589934592,1,0]
-val_439	hdfs://### HDFS PATH ###	4734	[1,2,4,8589934592,1,0]
-val_439	hdfs://### HDFS PATH ###	826	[1,2,4,8589934592,1,0]
-val_44	hdfs://### HDFS PATH ###	4068	[1,2,4,8589934592,1,0]
-val_443	hdfs://### HDFS PATH ###	4866	[1,2,4,8589934592,1,0]
-val_444	hdfs://### HDFS PATH ###	4818	[1,2,4,8589934592,1,0]
-val_446	hdfs://### HDFS PATH ###	538	[1,2,4,8589934592,1,0]
-val_448	hdfs://### HDFS PATH ###	5636	[1,2,4,8589934592,1,0]
-val_449	hdfs://### HDFS PATH ###	3434	[1,2,4,8589934592,1,0]
-val_452	hdfs://### HDFS PATH ###	3024	[1,2,4,8589934592,1,0]
-val_453	hdfs://### HDFS PATH ###	3482	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	2144	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	4432	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	5200	[1,2,4,8589934592,1,0]
-val_455	hdfs://### HDFS PATH ###	976	[1,2,4,8589934592,1,0]
-val_457	hdfs://### HDFS PATH ###	2446	[1,2,4,8589934592,1,0]
-val_458	hdfs://### HDFS PATH ###	3356	[1,2,4,8589934592,1,0]
-val_458	hdfs://### HDFS PATH ###	5442	[1,2,4,8589934592,1,0]
-val_459	hdfs://### HDFS PATH ###	1450	[1,2,4,8589934592,1,0]
-val_459	hdfs://### HDFS PATH ###	550	[1,2,4,8589934592,1,0]
-val_460	hdfs://### HDFS PATH ###	5010	[1,2,4,8589934592,1,0]
-val_462	hdfs://### HDFS PATH ###	5128	[1,2,4,8589934592,1,0]
-val_462	hdfs://### HDFS PATH ###	5350	[1,2,4,8589934592,1,0]
-val_463	hdfs://### HDFS PATH ###	1982	[1,2,4,8589934592,1,0]
-val_463	hdfs://### HDFS PATH ###	3980	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	1894	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	4126	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	658	[1,2,4,8589934592,1,0]
-val_467	hdfs://### HDFS PATH ###	3908	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	2120	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	2376	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	3526	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	4950	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	1380	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	2364	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	292	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	3968	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	5582	[1,2,4,8589934592,1,0]
-val_47	hdfs://### HDFS PATH ###	1198	[1,2,4,8589934592,1,0]
-val_470	hdfs://### HDFS PATH ###	2540	[1,2,4,8589934592,1,0]
-val_472	hdfs://### HDFS PATH ###	3238	[1,2,4,8589934592,1,0]
-val_475	hdfs://### HDFS PATH ###	898	[1,2,4,8589934592,1,0]
-val_477	hdfs://### HDFS PATH ###	5708	[1,2,4,8589934592,1,0]
-val_478	hdfs://### HDFS PATH ###	4444	[1,2,4,8589934592,1,0]
-val_478	hdfs://### HDFS PATH ###	4926	[1,2,4,8589934592,1,0]
-val_479	hdfs://### HDFS PATH ###	4770	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	3816	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	4570	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	5058	[1,2,4,8589934592,1,0]
-val_481	hdfs://### HDFS PATH ###	2434	[1,2,4,8589934592,1,0]
-val_482	hdfs://### HDFS PATH ###	586	[1,2,4,8589934592,1,0]
-val_483	hdfs://### HDFS PATH ###	4174	[1,2,4,8589934592,1,0]
-val_484	hdfs://### HDFS PATH ###	102	[1,2,4,8589934592,1,0]
-val_485	hdfs://### HDFS PATH ###	3734	[1,2,4,8589934592,1,0]
-val_487	hdfs://### HDFS PATH ###	3804	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	1128	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	1800	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	3344	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	742	[1,2,4,8589934592,1,0]
-val_490	hdfs://### HDFS PATH ###	2640	[1,2,4,8589934592,1,0]
-val_491	hdfs://### HDFS PATH ###	4710	[1,2,4,8589934592,1,0]
-val_492	hdfs://### HDFS PATH ###	3410	[1,2,4,8589934592,1,0]
-val_492	hdfs://### HDFS PATH ###	5362	[1,2,4,8589934592,1,0]
-val_493	hdfs://### HDFS PATH ###	4998	[1,2,4,8589934592,1,0]
-val_494	hdfs://### HDFS PATH ###	622	[1,2,4,8589934592,1,0]
-val_495	hdfs://### HDFS PATH ###	316	[1,2,4,8589934592,1,0]
-val_496	hdfs://### HDFS PATH ###	2076	[1,2,4,8589934592,1,0]
-val_497	hdfs://### HDFS PATH ###	3068	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	1332	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	3262	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	5418	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	3060	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	3864	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	4540	[1,2,4,8589934592,1,0]
-val_51	hdfs://### HDFS PATH ###	1462	[1,2,4,8589934592,1,0]
-val_51	hdfs://### HDFS PATH ###	2308	[1,2,4,8589934592,1,0]
-val_53	hdfs://### HDFS PATH ###	4186	[1,2,4,8589934592,1,0]
-val_54	hdfs://### HDFS PATH ###	1440	[1,2,4,8589934592,1,0]
-val_57	hdfs://### HDFS PATH ###	1024	[1,2,4,8589934592,1,0]
-val_58	hdfs://### HDFS PATH ###	1906	[1,2,4,8589934592,1,0]
-val_58	hdfs://### HDFS PATH ###	3128	[1,2,4,8589934592,1,0]
-val_64	hdfs://### HDFS PATH ###	3516	[1,2,4,8589934592,1,0]
-val_65	hdfs://### HDFS PATH ###	1592	[1,2,4,8589934592,1,0]
-val_66	hdfs://### HDFS PATH ###	198	[1,2,4,8589934592,1,0]
-val_67	hdfs://### HDFS PATH ###	1754	[1,2,4,8589934592,1,0]
-val_67	hdfs://### HDFS PATH ###	5306	[1,2,4,8589934592,1,0]
-val_69	hdfs://### HDFS PATH ###	3570	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	3794	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	4548	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	4640	[1,2,4,8589934592,1,0]
-val_72	hdfs://### HDFS PATH ###	1208	[1,2,4,8589934592,1,0]
-val_72	hdfs://### HDFS PATH ###	2792	[1,2,4,8589934592,1,0]
-val_74	hdfs://### HDFS PATH ###	3548	[1,2,4,8589934592,1,0]
-val_76	hdfs://### HDFS PATH ###	3378	[1,2,4,8589934592,1,0]
-val_76	hdfs://### HDFS PATH ###	3538	[1,2,4,8589934592,1,0]
-val_77	hdfs://### HDFS PATH ###	2622	[1,2,4,8589934592,1,0]
-val_78	hdfs://### HDFS PATH ###	3368	[1,2,4,8589934592,1,0]
-val_8	hdfs://### HDFS PATH ###	1916	[1,2,4,8589934592,1,0]
-val_80	hdfs://### HDFS PATH ###	4058	[1,2,4,8589934592,1,0]
-val_82	hdfs://### HDFS PATH ###	396	[1,2,4,8589934592,1,0]
-val_83	hdfs://### HDFS PATH ###	1674	[1,2,4,8589934592,1,0]
-val_83	hdfs://### HDFS PATH ###	5070	[1,2,4,8589934592,1,0]
-val_84	hdfs://### HDFS PATH ###	1872	[1,2,4,8589934592,1,0]
-val_84	hdfs://### HDFS PATH ###	5606	[1,2,4,8589934592,1,0]
-val_85	hdfs://### HDFS PATH ###	2612	[1,2,4,8589934592,1,0]
-val_86	hdfs://### HDFS PATH ###	12	[1,2,4,8589934592,1,0]
-val_87	hdfs://### HDFS PATH ###	2652	[1,2,4,8589934592,1,0]
-val_9	hdfs://### HDFS PATH ###	5398	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	2802	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	4304	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	5744	[1,2,4,8589934592,1,0]
-val_92	hdfs://### HDFS PATH ###	1176	[1,2,4,8589934592,1,0]
-val_95	hdfs://### HDFS PATH ###	2400	[1,2,4,8589934592,1,0]
-val_95	hdfs://### HDFS PATH ###	3160	[1,2,4,8589934592,1,0]
-val_96	hdfs://### HDFS PATH ###	2216	[1,2,4,8589934592,1,0]
-val_97	hdfs://### HDFS PATH ###	5572	[1,2,4,8589934592,1,0]
-val_97	hdfs://### HDFS PATH ###	5802	[1,2,4,8589934592,1,0]
-val_98	hdfs://### HDFS PATH ###	2458	[1,2,4,8589934592,1,0]
-val_98	hdfs://### HDFS PATH ###	92	[1,2,4,8589934592,1,0]
-PREHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-    (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-       WHERE value = "val_0") b
- ON
-   a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-    (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-       WHERE value = "val_0") b
- ON
-   a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 4 (PARTITION-LEVEL SORT, 4)
-        Reducer 3 <- Reducer 2 (GROUP, 4)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: default__src_src1_index__
-                  Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: bigint)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: array<bigint>)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: default__src_src2_index__
-                  Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: bigint)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: array<bigint>)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: string), _col1 (type: bigint)
-                  1 _col0 (type: string), _col1 (type: bigint)
-                outputColumnNames: _col0, _col1, _col2, _col5
-                Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
-                  Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: bigint)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: collect_set(_col1)
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: array<bigint>)
-        Reducer 3 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: collect_set(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-     (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-        WHERE value = "val_0") b
-  ON
-    a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-     (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-        WHERE value = "val_0") b
-  ON
-    a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	val_0
-0	val_0
-0	val_0
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	val_0
-0	val_0
-0	val_0
-PREHOOK: query: DROP INDEX src1_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src1_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src2_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src2_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src


[2/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
deleted file mode 100644
index 8a88678..0000000
--- a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
+++ /dev/null
@@ -1,1271 +0,0 @@
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	val_0
-0	val_0
-0	val_0
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-
-PREHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-PREHOOK: query: ALTER INDEX src1_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src1_index__
-POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src2_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src2_index__
-POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__src_src1_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src1_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	hdfs://### HDFS PATH ###	2088	[1,2,4,8589934592,1,0]
-0	hdfs://### HDFS PATH ###	2632	[1,2,4,8589934592,1,0]
-0	hdfs://### HDFS PATH ###	968	[1,2,4,8589934592,1,0]
-10	hdfs://### HDFS PATH ###	2846	[1,2,4,8589934592,1,0]
-100	hdfs://### HDFS PATH ###	2156	[1,2,4,8589934592,1,0]
-100	hdfs://### HDFS PATH ###	5374	[1,2,4,8589934592,1,0]
-103	hdfs://### HDFS PATH ###	1484	[1,2,4,8589934592,1,0]
-103	hdfs://### HDFS PATH ###	3614	[1,2,4,8589934592,1,0]
-104	hdfs://### HDFS PATH ###	4114	[1,2,4,8589934592,1,0]
-104	hdfs://### HDFS PATH ###	4628	[1,2,4,8589934592,1,0]
-105	hdfs://### HDFS PATH ###	4196	[1,2,4,8589934592,1,0]
-11	hdfs://### HDFS PATH ###	3170	[1,2,4,8589934592,1,0]
-111	hdfs://### HDFS PATH ###	1186	[1,2,4,8589934592,1,0]
-113	hdfs://### HDFS PATH ###	3638	[1,2,4,8589934592,1,0]
-113	hdfs://### HDFS PATH ###	920	[1,2,4,8589934592,1,0]
-114	hdfs://### HDFS PATH ###	4280	[1,2,4,8589934592,1,0]
-116	hdfs://### HDFS PATH ###	3746	[1,2,4,8589934592,1,0]
-118	hdfs://### HDFS PATH ###	2686	[1,2,4,8589934592,1,0]
-118	hdfs://### HDFS PATH ###	2780	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	2064	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	3332	[1,2,4,8589934592,1,0]
-119	hdfs://### HDFS PATH ###	4674	[1,2,4,8589934592,1,0]
-12	hdfs://### HDFS PATH ###	1720	[1,2,4,8589934592,1,0]
-12	hdfs://### HDFS PATH ###	4362	[1,2,4,8589934592,1,0]
-120	hdfs://### HDFS PATH ###	2284	[1,2,4,8589934592,1,0]
-120	hdfs://### HDFS PATH ###	4830	[1,2,4,8589934592,1,0]
-125	hdfs://### HDFS PATH ###	1344	[1,2,4,8589934592,1,0]
-125	hdfs://### HDFS PATH ###	4468	[1,2,4,8589934592,1,0]
-126	hdfs://### HDFS PATH ###	5732	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	208	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	3896	[1,2,4,8589934592,1,0]
-128	hdfs://### HDFS PATH ###	988	[1,2,4,8589934592,1,0]
-129	hdfs://### HDFS PATH ###	1094	[1,2,4,8589934592,1,0]
-129	hdfs://### HDFS PATH ###	2040	[1,2,4,8589934592,1,0]
-131	hdfs://### HDFS PATH ###	2296	[1,2,4,8589934592,1,0]
-133	hdfs://### HDFS PATH ###	5164	[1,2,4,8589934592,1,0]
-134	hdfs://### HDFS PATH ###	2698	[1,2,4,8589934592,1,0]
-134	hdfs://### HDFS PATH ###	5294	[1,2,4,8589934592,1,0]
-136	hdfs://### HDFS PATH ###	5080	[1,2,4,8589934592,1,0]
-137	hdfs://### HDFS PATH ###	1650	[1,2,4,8589934592,1,0]
-137	hdfs://### HDFS PATH ###	2552	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	1472	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	1848	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	2734	[1,2,4,8589934592,1,0]
-138	hdfs://### HDFS PATH ###	3470	[1,2,4,8589934592,1,0]
-143	hdfs://### HDFS PATH ###	3226	[1,2,4,8589934592,1,0]
-145	hdfs://### HDFS PATH ###	304	[1,2,4,8589934592,1,0]
-146	hdfs://### HDFS PATH ###	232	[1,2,4,8589934592,1,0]
-146	hdfs://### HDFS PATH ###	5430	[1,2,4,8589934592,1,0]
-149	hdfs://### HDFS PATH ###	1058	[1,2,4,8589934592,1,0]
-149	hdfs://### HDFS PATH ###	3422	[1,2,4,8589934592,1,0]
-15	hdfs://### HDFS PATH ###	2770	[1,2,4,8589934592,1,0]
-15	hdfs://### HDFS PATH ###	386	[1,2,4,8589934592,1,0]
-150	hdfs://### HDFS PATH ###	150	[1,2,4,8589934592,1,0]
-152	hdfs://### HDFS PATH ###	280	[1,2,4,8589934592,1,0]
-152	hdfs://### HDFS PATH ###	5648	[1,2,4,8589934592,1,0]
-153	hdfs://### HDFS PATH ###	502	[1,2,4,8589934592,1,0]
-155	hdfs://### HDFS PATH ###	932	[1,2,4,8589934592,1,0]
-156	hdfs://### HDFS PATH ###	2352	[1,2,4,8589934592,1,0]
-157	hdfs://### HDFS PATH ###	1140	[1,2,4,8589934592,1,0]
-158	hdfs://### HDFS PATH ###	2052	[1,2,4,8589934592,1,0]
-160	hdfs://### HDFS PATH ###	3274	[1,2,4,8589934592,1,0]
-162	hdfs://### HDFS PATH ###	754	[1,2,4,8589934592,1,0]
-163	hdfs://### HDFS PATH ###	4650	[1,2,4,8589934592,1,0]
-164	hdfs://### HDFS PATH ###	4408	[1,2,4,8589934592,1,0]
-164	hdfs://### HDFS PATH ###	4492	[1,2,4,8589934592,1,0]
-165	hdfs://### HDFS PATH ###	2236	[1,2,4,8589934592,1,0]
-165	hdfs://### HDFS PATH ###	44	[1,2,4,8589934592,1,0]
-166	hdfs://### HDFS PATH ###	418	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	3686	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	5502	[1,2,4,8589934592,1,0]
-167	hdfs://### HDFS PATH ###	874	[1,2,4,8589934592,1,0]
-168	hdfs://### HDFS PATH ###	3180	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	1308	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	2588	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	4854	[1,2,4,8589934592,1,0]
-169	hdfs://### HDFS PATH ###	5754	[1,2,4,8589934592,1,0]
-17	hdfs://### HDFS PATH ###	910	[1,2,4,8589934592,1,0]
-170	hdfs://### HDFS PATH ###	1106	[1,2,4,8589934592,1,0]
-172	hdfs://### HDFS PATH ###	2018	[1,2,4,8589934592,1,0]
-172	hdfs://### HDFS PATH ###	5104	[1,2,4,8589934592,1,0]
-174	hdfs://### HDFS PATH ###	598	[1,2,4,8589934592,1,0]
-174	hdfs://### HDFS PATH ###	682	[1,2,4,8589934592,1,0]
-175	hdfs://### HDFS PATH ###	4150	[1,2,4,8589934592,1,0]
-175	hdfs://### HDFS PATH ###	5176	[1,2,4,8589934592,1,0]
-176	hdfs://### HDFS PATH ###	1428	[1,2,4,8589934592,1,0]
-176	hdfs://### HDFS PATH ###	1556	[1,2,4,8589934592,1,0]
-177	hdfs://### HDFS PATH ###	3036	[1,2,4,8589934592,1,0]
-178	hdfs://### HDFS PATH ###	4938	[1,2,4,8589934592,1,0]
-179	hdfs://### HDFS PATH ###	2006	[1,2,4,8589934592,1,0]
-179	hdfs://### HDFS PATH ###	2674	[1,2,4,8589934592,1,0]
-18	hdfs://### HDFS PATH ###	5340	[1,2,4,8589934592,1,0]
-18	hdfs://### HDFS PATH ###	5514	[1,2,4,8589934592,1,0]
-180	hdfs://### HDFS PATH ###	1696	[1,2,4,8589934592,1,0]
-181	hdfs://### HDFS PATH ###	1742	[1,2,4,8589934592,1,0]
-183	hdfs://### HDFS PATH ###	5536	[1,2,4,8589934592,1,0]
-186	hdfs://### HDFS PATH ###	5466	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	1416	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	2492	[1,2,4,8589934592,1,0]
-187	hdfs://### HDFS PATH ###	4516	[1,2,4,8589934592,1,0]
-189	hdfs://### HDFS PATH ###	5188	[1,2,4,8589934592,1,0]
-19	hdfs://### HDFS PATH ###	2824	[1,2,4,8589934592,1,0]
-190	hdfs://### HDFS PATH ###	4244	[1,2,4,8589934592,1,0]
-191	hdfs://### HDFS PATH ###	2192	[1,2,4,8589934592,1,0]
-191	hdfs://### HDFS PATH ###	3852	[1,2,4,8589934592,1,0]
-192	hdfs://### HDFS PATH ###	1392	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	126	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	4078	[1,2,4,8589934592,1,0]
-193	hdfs://### HDFS PATH ###	514	[1,2,4,8589934592,1,0]
-194	hdfs://### HDFS PATH ###	5684	[1,2,4,8589934592,1,0]
-195	hdfs://### HDFS PATH ###	3286	[1,2,4,8589934592,1,0]
-195	hdfs://### HDFS PATH ###	886	[1,2,4,8589934592,1,0]
-196	hdfs://### HDFS PATH ###	2410	[1,2,4,8589934592,1,0]
-197	hdfs://### HDFS PATH ###	2108	[1,2,4,8589934592,1,0]
-197	hdfs://### HDFS PATH ###	2480	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	2180	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	4806	[1,2,4,8589934592,1,0]
-199	hdfs://### HDFS PATH ###	646	[1,2,4,8589934592,1,0]
-2	hdfs://### HDFS PATH ###	4004	[1,2,4,8589934592,1,0]
-20	hdfs://### HDFS PATH ###	1118	[1,2,4,8589934592,1,0]
-200	hdfs://### HDFS PATH ###	4698	[1,2,4,8589934592,1,0]
-200	hdfs://### HDFS PATH ###	5790	[1,2,4,8589934592,1,0]
-201	hdfs://### HDFS PATH ###	4384	[1,2,4,8589934592,1,0]
-202	hdfs://### HDFS PATH ###	3932	[1,2,4,8589934592,1,0]
-203	hdfs://### HDFS PATH ###	4314	[1,2,4,8589934592,1,0]
-203	hdfs://### HDFS PATH ###	944	[1,2,4,8589934592,1,0]
-205	hdfs://### HDFS PATH ###	1046	[1,2,4,8589934592,1,0]
-205	hdfs://### HDFS PATH ###	2272	[1,2,4,8589934592,1,0]
-207	hdfs://### HDFS PATH ###	5022	[1,2,4,8589934592,1,0]
-207	hdfs://### HDFS PATH ###	634	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	1272	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	1948	[1,2,4,8589934592,1,0]
-208	hdfs://### HDFS PATH ###	670	[1,2,4,8589934592,1,0]
-209	hdfs://### HDFS PATH ###	3504	[1,2,4,8589934592,1,0]
-209	hdfs://### HDFS PATH ###	374	[1,2,4,8589934592,1,0]
-213	hdfs://### HDFS PATH ###	1508	[1,2,4,8589934592,1,0]
-213	hdfs://### HDFS PATH ###	220	[1,2,4,8589934592,1,0]
-214	hdfs://### HDFS PATH ###	5116	[1,2,4,8589934592,1,0]
-216	hdfs://### HDFS PATH ###	1520	[1,2,4,8589934592,1,0]
-216	hdfs://### HDFS PATH ###	3650	[1,2,4,8589934592,1,0]
-217	hdfs://### HDFS PATH ###	1860	[1,2,4,8589934592,1,0]
-217	hdfs://### HDFS PATH ###	4396	[1,2,4,8589934592,1,0]
-218	hdfs://### HDFS PATH ###	3446	[1,2,4,8589934592,1,0]
-219	hdfs://### HDFS PATH ###	3710	[1,2,4,8589934592,1,0]
-219	hdfs://### HDFS PATH ###	478	[1,2,4,8589934592,1,0]
-221	hdfs://### HDFS PATH ###	1164	[1,2,4,8589934592,1,0]
-221	hdfs://### HDFS PATH ###	1580	[1,2,4,8589934592,1,0]
-222	hdfs://### HDFS PATH ###	5720	[1,2,4,8589934592,1,0]
-223	hdfs://### HDFS PATH ###	3398	[1,2,4,8589934592,1,0]
-223	hdfs://### HDFS PATH ###	3758	[1,2,4,8589934592,1,0]
-224	hdfs://### HDFS PATH ###	174	[1,2,4,8589934592,1,0]
-224	hdfs://### HDFS PATH ###	2892	[1,2,4,8589934592,1,0]
-226	hdfs://### HDFS PATH ###	3048	[1,2,4,8589934592,1,0]
-228	hdfs://### HDFS PATH ###	3458	[1,2,4,8589934592,1,0]
-229	hdfs://### HDFS PATH ###	3202	[1,2,4,8589934592,1,0]
-229	hdfs://### HDFS PATH ###	3956	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	1730	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	1936	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	2260	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	3580	[1,2,4,8589934592,1,0]
-230	hdfs://### HDFS PATH ###	4914	[1,2,4,8589934592,1,0]
-233	hdfs://### HDFS PATH ###	3214	[1,2,4,8589934592,1,0]
-233	hdfs://### HDFS PATH ###	5140	[1,2,4,8589934592,1,0]
-235	hdfs://### HDFS PATH ###	4046	[1,2,4,8589934592,1,0]
-237	hdfs://### HDFS PATH ###	4722	[1,2,4,8589934592,1,0]
-237	hdfs://### HDFS PATH ###	574	[1,2,4,8589934592,1,0]
-238	hdfs://### HDFS PATH ###	0	[1,2,4,8589934592,1,0]
-238	hdfs://### HDFS PATH ###	2746	[1,2,4,8589934592,1,0]
-239	hdfs://### HDFS PATH ###	1496	[1,2,4,8589934592,1,0]
-239	hdfs://### HDFS PATH ###	3722	[1,2,4,8589934592,1,0]
-24	hdfs://### HDFS PATH ###	1972	[1,2,4,8589934592,1,0]
-24	hdfs://### HDFS PATH ###	4594	[1,2,4,8589934592,1,0]
-241	hdfs://### HDFS PATH ###	1662	[1,2,4,8589934592,1,0]
-242	hdfs://### HDFS PATH ###	2940	[1,2,4,8589934592,1,0]
-242	hdfs://### HDFS PATH ###	3012	[1,2,4,8589934592,1,0]
-244	hdfs://### HDFS PATH ###	3872	[1,2,4,8589934592,1,0]
-247	hdfs://### HDFS PATH ###	718	[1,2,4,8589934592,1,0]
-248	hdfs://### HDFS PATH ###	4758	[1,2,4,8589934592,1,0]
-249	hdfs://### HDFS PATH ###	5034	[1,2,4,8589934592,1,0]
-252	hdfs://### HDFS PATH ###	454	[1,2,4,8589934592,1,0]
-255	hdfs://### HDFS PATH ###	4616	[1,2,4,8589934592,1,0]
-255	hdfs://### HDFS PATH ###	68	[1,2,4,8589934592,1,0]
-256	hdfs://### HDFS PATH ###	3770	[1,2,4,8589934592,1,0]
-256	hdfs://### HDFS PATH ###	5272	[1,2,4,8589934592,1,0]
-257	hdfs://### HDFS PATH ###	4208	[1,2,4,8589934592,1,0]
-258	hdfs://### HDFS PATH ###	4292	[1,2,4,8589934592,1,0]
-26	hdfs://### HDFS PATH ###	2226	[1,2,4,8589934592,1,0]
-26	hdfs://### HDFS PATH ###	5284	[1,2,4,8589934592,1,0]
-260	hdfs://### HDFS PATH ###	1764	[1,2,4,8589934592,1,0]
-262	hdfs://### HDFS PATH ###	4326	[1,2,4,8589934592,1,0]
-263	hdfs://### HDFS PATH ###	3782	[1,2,4,8589934592,1,0]
-265	hdfs://### HDFS PATH ###	114	[1,2,4,8589934592,1,0]
-265	hdfs://### HDFS PATH ###	5046	[1,2,4,8589934592,1,0]
-266	hdfs://### HDFS PATH ###	814	[1,2,4,8589934592,1,0]
-27	hdfs://### HDFS PATH ###	34	[1,2,4,8589934592,1,0]
-272	hdfs://### HDFS PATH ###	1836	[1,2,4,8589934592,1,0]
-272	hdfs://### HDFS PATH ###	2976	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	162	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	2868	[1,2,4,8589934592,1,0]
-273	hdfs://### HDFS PATH ###	5524	[1,2,4,8589934592,1,0]
-274	hdfs://### HDFS PATH ###	3698	[1,2,4,8589934592,1,0]
-275	hdfs://### HDFS PATH ###	1638	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	1260	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	2856	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	362	[1,2,4,8589934592,1,0]
-277	hdfs://### HDFS PATH ###	4902	[1,2,4,8589934592,1,0]
-278	hdfs://### HDFS PATH ###	1544	[1,2,4,8589934592,1,0]
-278	hdfs://### HDFS PATH ###	80	[1,2,4,8589934592,1,0]
-28	hdfs://### HDFS PATH ###	5616	[1,2,4,8589934592,1,0]
-280	hdfs://### HDFS PATH ###	1226	[1,2,4,8589934592,1,0]
-280	hdfs://### HDFS PATH ###	3992	[1,2,4,8589934592,1,0]
-281	hdfs://### HDFS PATH ###	350	[1,2,4,8589934592,1,0]
-281	hdfs://### HDFS PATH ###	5548	[1,2,4,8589934592,1,0]
-282	hdfs://### HDFS PATH ###	2468	[1,2,4,8589934592,1,0]
-282	hdfs://### HDFS PATH ###	2722	[1,2,4,8589934592,1,0]
-283	hdfs://### HDFS PATH ###	4022	[1,2,4,8589934592,1,0]
-284	hdfs://### HDFS PATH ###	1708	[1,2,4,8589934592,1,0]
-285	hdfs://### HDFS PATH ###	5478	[1,2,4,8589934592,1,0]
-286	hdfs://### HDFS PATH ###	1404	[1,2,4,8589934592,1,0]
-287	hdfs://### HDFS PATH ###	490	[1,2,4,8589934592,1,0]
-288	hdfs://### HDFS PATH ###	2422	[1,2,4,8589934592,1,0]
-288	hdfs://### HDFS PATH ###	3840	[1,2,4,8589934592,1,0]
-289	hdfs://### HDFS PATH ###	1568	[1,2,4,8589934592,1,0]
-291	hdfs://### HDFS PATH ###	4582	[1,2,4,8589934592,1,0]
-292	hdfs://### HDFS PATH ###	466	[1,2,4,8589934592,1,0]
-296	hdfs://### HDFS PATH ###	3626	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	2168	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	4456	[1,2,4,8589934592,1,0]
-298	hdfs://### HDFS PATH ###	5386	[1,2,4,8589934592,1,0]
-30	hdfs://### HDFS PATH ###	3494	[1,2,4,8589934592,1,0]
-302	hdfs://### HDFS PATH ###	1034	[1,2,4,8589934592,1,0]
-305	hdfs://### HDFS PATH ###	4782	[1,2,4,8589934592,1,0]
-306	hdfs://### HDFS PATH ###	2880	[1,2,4,8589934592,1,0]
-307	hdfs://### HDFS PATH ###	2812	[1,2,4,8589934592,1,0]
-307	hdfs://### HDFS PATH ###	5672	[1,2,4,8589934592,1,0]
-308	hdfs://### HDFS PATH ###	2388	[1,2,4,8589934592,1,0]
-309	hdfs://### HDFS PATH ###	2904	[1,2,4,8589934592,1,0]
-309	hdfs://### HDFS PATH ###	790	[1,2,4,8589934592,1,0]
-310	hdfs://### HDFS PATH ###	4962	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	1000	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	1626	[1,2,4,8589934592,1,0]
-311	hdfs://### HDFS PATH ###	22	[1,2,4,8589934592,1,0]
-315	hdfs://### HDFS PATH ###	5594	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	1012	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	2576	[1,2,4,8589934592,1,0]
-316	hdfs://### HDFS PATH ###	3944	[1,2,4,8589934592,1,0]
-317	hdfs://### HDFS PATH ###	3104	[1,2,4,8589934592,1,0]
-317	hdfs://### HDFS PATH ###	4974	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	1602	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	2504	[1,2,4,8589934592,1,0]
-318	hdfs://### HDFS PATH ###	2516	[1,2,4,8589934592,1,0]
-321	hdfs://### HDFS PATH ###	3308	[1,2,4,8589934592,1,0]
-321	hdfs://### HDFS PATH ###	4090	[1,2,4,8589934592,1,0]
-322	hdfs://### HDFS PATH ###	2096	[1,2,4,8589934592,1,0]
-322	hdfs://### HDFS PATH ###	3250	[1,2,4,8589934592,1,0]
-323	hdfs://### HDFS PATH ###	4878	[1,2,4,8589934592,1,0]
-325	hdfs://### HDFS PATH ###	4890	[1,2,4,8589934592,1,0]
-325	hdfs://### HDFS PATH ###	862	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	2248	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	2928	[1,2,4,8589934592,1,0]
-327	hdfs://### HDFS PATH ###	338	[1,2,4,8589934592,1,0]
-33	hdfs://### HDFS PATH ###	3592	[1,2,4,8589934592,1,0]
-331	hdfs://### HDFS PATH ###	2988	[1,2,4,8589934592,1,0]
-331	hdfs://### HDFS PATH ###	4034	[1,2,4,8589934592,1,0]
-332	hdfs://### HDFS PATH ###	1614	[1,2,4,8589934592,1,0]
-333	hdfs://### HDFS PATH ###	1684	[1,2,4,8589934592,1,0]
-333	hdfs://### HDFS PATH ###	4986	[1,2,4,8589934592,1,0]
-335	hdfs://### HDFS PATH ###	4102	[1,2,4,8589934592,1,0]
-336	hdfs://### HDFS PATH ###	3148	[1,2,4,8589934592,1,0]
-338	hdfs://### HDFS PATH ###	526	[1,2,4,8589934592,1,0]
-339	hdfs://### HDFS PATH ###	956	[1,2,4,8589934592,1,0]
-34	hdfs://### HDFS PATH ###	3192	[1,2,4,8589934592,1,0]
-341	hdfs://### HDFS PATH ###	5406	[1,2,4,8589934592,1,0]
-342	hdfs://### HDFS PATH ###	3558	[1,2,4,8589934592,1,0]
-342	hdfs://### HDFS PATH ###	838	[1,2,4,8589934592,1,0]
-344	hdfs://### HDFS PATH ###	3674	[1,2,4,8589934592,1,0]
-344	hdfs://### HDFS PATH ###	5560	[1,2,4,8589934592,1,0]
-345	hdfs://### HDFS PATH ###	1082	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	1882	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	1960	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	4338	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	5490	[1,2,4,8589934592,1,0]
-348	hdfs://### HDFS PATH ###	5660	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	1238	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	3138	[1,2,4,8589934592,1,0]
-35	hdfs://### HDFS PATH ###	4012	[1,2,4,8589934592,1,0]
-351	hdfs://### HDFS PATH ###	4604	[1,2,4,8589934592,1,0]
-353	hdfs://### HDFS PATH ###	1812	[1,2,4,8589934592,1,0]
-353	hdfs://### HDFS PATH ###	5092	[1,2,4,8589934592,1,0]
-356	hdfs://### HDFS PATH ###	1284	[1,2,4,8589934592,1,0]
-360	hdfs://### HDFS PATH ###	4746	[1,2,4,8589934592,1,0]
-362	hdfs://### HDFS PATH ###	5454	[1,2,4,8589934592,1,0]
-364	hdfs://### HDFS PATH ###	2662	[1,2,4,8589934592,1,0]
-365	hdfs://### HDFS PATH ###	802	[1,2,4,8589934592,1,0]
-366	hdfs://### HDFS PATH ###	4138	[1,2,4,8589934592,1,0]
-367	hdfs://### HDFS PATH ###	3662	[1,2,4,8589934592,1,0]
-367	hdfs://### HDFS PATH ###	850	[1,2,4,8589934592,1,0]
-368	hdfs://### HDFS PATH ###	3602	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	186	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	2564	[1,2,4,8589934592,1,0]
-369	hdfs://### HDFS PATH ###	2952	[1,2,4,8589934592,1,0]
-37	hdfs://### HDFS PATH ###	328	[1,2,4,8589934592,1,0]
-37	hdfs://### HDFS PATH ###	5626	[1,2,4,8589934592,1,0]
-373	hdfs://### HDFS PATH ###	1824	[1,2,4,8589934592,1,0]
-374	hdfs://### HDFS PATH ###	268	[1,2,4,8589934592,1,0]
-375	hdfs://### HDFS PATH ###	5212	[1,2,4,8589934592,1,0]
-377	hdfs://### HDFS PATH ###	766	[1,2,4,8589934592,1,0]
-378	hdfs://### HDFS PATH ###	1152	[1,2,4,8589934592,1,0]
-379	hdfs://### HDFS PATH ###	5328	[1,2,4,8589934592,1,0]
-382	hdfs://### HDFS PATH ###	1320	[1,2,4,8589934592,1,0]
-382	hdfs://### HDFS PATH ###	4528	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	1788	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	5260	[1,2,4,8589934592,1,0]
-384	hdfs://### HDFS PATH ###	5316	[1,2,4,8589934592,1,0]
-386	hdfs://### HDFS PATH ###	1356	[1,2,4,8589934592,1,0]
-389	hdfs://### HDFS PATH ###	2916	[1,2,4,8589934592,1,0]
-392	hdfs://### HDFS PATH ###	2964	[1,2,4,8589934592,1,0]
-393	hdfs://### HDFS PATH ###	2132	[1,2,4,8589934592,1,0]
-394	hdfs://### HDFS PATH ###	562	[1,2,4,8589934592,1,0]
-395	hdfs://### HDFS PATH ###	2710	[1,2,4,8589934592,1,0]
-395	hdfs://### HDFS PATH ###	3116	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	3092	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	4372	[1,2,4,8589934592,1,0]
-396	hdfs://### HDFS PATH ###	706	[1,2,4,8589934592,1,0]
-397	hdfs://### HDFS PATH ###	4558	[1,2,4,8589934592,1,0]
-397	hdfs://### HDFS PATH ###	778	[1,2,4,8589934592,1,0]
-399	hdfs://### HDFS PATH ###	1296	[1,2,4,8589934592,1,0]
-399	hdfs://### HDFS PATH ###	694	[1,2,4,8589934592,1,0]
-4	hdfs://### HDFS PATH ###	1218	[1,2,4,8589934592,1,0]
-400	hdfs://### HDFS PATH ###	5778	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	138	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	3000	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	3828	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	4268	[1,2,4,8589934592,1,0]
-401	hdfs://### HDFS PATH ###	5224	[1,2,4,8589934592,1,0]
-402	hdfs://### HDFS PATH ###	3080	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	406	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	4162	[1,2,4,8589934592,1,0]
-403	hdfs://### HDFS PATH ###	5766	[1,2,4,8589934592,1,0]
-404	hdfs://### HDFS PATH ###	1776	[1,2,4,8589934592,1,0]
-404	hdfs://### HDFS PATH ###	2318	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	244	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	4220	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	4256	[1,2,4,8589934592,1,0]
-406	hdfs://### HDFS PATH ###	5152	[1,2,4,8589934592,1,0]
-407	hdfs://### HDFS PATH ###	5248	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	2528	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	4232	[1,2,4,8589934592,1,0]
-409	hdfs://### HDFS PATH ###	56	[1,2,4,8589934592,1,0]
-41	hdfs://### HDFS PATH ###	3388	[1,2,4,8589934592,1,0]
-411	hdfs://### HDFS PATH ###	1924	[1,2,4,8589934592,1,0]
-413	hdfs://### HDFS PATH ###	2600	[1,2,4,8589934592,1,0]
-413	hdfs://### HDFS PATH ###	610	[1,2,4,8589934592,1,0]
-414	hdfs://### HDFS PATH ###	4686	[1,2,4,8589934592,1,0]
-414	hdfs://### HDFS PATH ###	5696	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	430	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	4794	[1,2,4,8589934592,1,0]
-417	hdfs://### HDFS PATH ###	730	[1,2,4,8589934592,1,0]
-418	hdfs://### HDFS PATH ###	2204	[1,2,4,8589934592,1,0]
-419	hdfs://### HDFS PATH ###	2758	[1,2,4,8589934592,1,0]
-42	hdfs://### HDFS PATH ###	2030	[1,2,4,8589934592,1,0]
-42	hdfs://### HDFS PATH ###	3298	[1,2,4,8589934592,1,0]
-421	hdfs://### HDFS PATH ###	5236	[1,2,4,8589934592,1,0]
-424	hdfs://### HDFS PATH ###	4350	[1,2,4,8589934592,1,0]
-424	hdfs://### HDFS PATH ###	4504	[1,2,4,8589934592,1,0]
-427	hdfs://### HDFS PATH ###	1248	[1,2,4,8589934592,1,0]
-429	hdfs://### HDFS PATH ###	256	[1,2,4,8589934592,1,0]
-429	hdfs://### HDFS PATH ###	4842	[1,2,4,8589934592,1,0]
-43	hdfs://### HDFS PATH ###	2330	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	1532	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	3320	[1,2,4,8589934592,1,0]
-430	hdfs://### HDFS PATH ###	442	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	1994	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	4420	[1,2,4,8589934592,1,0]
-431	hdfs://### HDFS PATH ###	4480	[1,2,4,8589934592,1,0]
-432	hdfs://### HDFS PATH ###	3920	[1,2,4,8589934592,1,0]
-435	hdfs://### HDFS PATH ###	2834	[1,2,4,8589934592,1,0]
-436	hdfs://### HDFS PATH ###	2340	[1,2,4,8589934592,1,0]
-437	hdfs://### HDFS PATH ###	1368	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	1070	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	3884	[1,2,4,8589934592,1,0]
-438	hdfs://### HDFS PATH ###	4662	[1,2,4,8589934592,1,0]
-439	hdfs://### HDFS PATH ###	4734	[1,2,4,8589934592,1,0]
-439	hdfs://### HDFS PATH ###	826	[1,2,4,8589934592,1,0]
-44	hdfs://### HDFS PATH ###	4068	[1,2,4,8589934592,1,0]
-443	hdfs://### HDFS PATH ###	4866	[1,2,4,8589934592,1,0]
-444	hdfs://### HDFS PATH ###	4818	[1,2,4,8589934592,1,0]
-446	hdfs://### HDFS PATH ###	538	[1,2,4,8589934592,1,0]
-448	hdfs://### HDFS PATH ###	5636	[1,2,4,8589934592,1,0]
-449	hdfs://### HDFS PATH ###	3434	[1,2,4,8589934592,1,0]
-452	hdfs://### HDFS PATH ###	3024	[1,2,4,8589934592,1,0]
-453	hdfs://### HDFS PATH ###	3482	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	2144	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	4432	[1,2,4,8589934592,1,0]
-454	hdfs://### HDFS PATH ###	5200	[1,2,4,8589934592,1,0]
-455	hdfs://### HDFS PATH ###	976	[1,2,4,8589934592,1,0]
-457	hdfs://### HDFS PATH ###	2446	[1,2,4,8589934592,1,0]
-458	hdfs://### HDFS PATH ###	3356	[1,2,4,8589934592,1,0]
-458	hdfs://### HDFS PATH ###	5442	[1,2,4,8589934592,1,0]
-459	hdfs://### HDFS PATH ###	1450	[1,2,4,8589934592,1,0]
-459	hdfs://### HDFS PATH ###	550	[1,2,4,8589934592,1,0]
-460	hdfs://### HDFS PATH ###	5010	[1,2,4,8589934592,1,0]
-462	hdfs://### HDFS PATH ###	5128	[1,2,4,8589934592,1,0]
-462	hdfs://### HDFS PATH ###	5350	[1,2,4,8589934592,1,0]
-463	hdfs://### HDFS PATH ###	1982	[1,2,4,8589934592,1,0]
-463	hdfs://### HDFS PATH ###	3980	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	1894	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	4126	[1,2,4,8589934592,1,0]
-466	hdfs://### HDFS PATH ###	658	[1,2,4,8589934592,1,0]
-467	hdfs://### HDFS PATH ###	3908	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	2120	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	2376	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	3526	[1,2,4,8589934592,1,0]
-468	hdfs://### HDFS PATH ###	4950	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	1380	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	2364	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	292	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	3968	[1,2,4,8589934592,1,0]
-469	hdfs://### HDFS PATH ###	5582	[1,2,4,8589934592,1,0]
-47	hdfs://### HDFS PATH ###	1198	[1,2,4,8589934592,1,0]
-470	hdfs://### HDFS PATH ###	2540	[1,2,4,8589934592,1,0]
-472	hdfs://### HDFS PATH ###	3238	[1,2,4,8589934592,1,0]
-475	hdfs://### HDFS PATH ###	898	[1,2,4,8589934592,1,0]
-477	hdfs://### HDFS PATH ###	5708	[1,2,4,8589934592,1,0]
-478	hdfs://### HDFS PATH ###	4444	[1,2,4,8589934592,1,0]
-478	hdfs://### HDFS PATH ###	4926	[1,2,4,8589934592,1,0]
-479	hdfs://### HDFS PATH ###	4770	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	3816	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	4570	[1,2,4,8589934592,1,0]
-480	hdfs://### HDFS PATH ###	5058	[1,2,4,8589934592,1,0]
-481	hdfs://### HDFS PATH ###	2434	[1,2,4,8589934592,1,0]
-482	hdfs://### HDFS PATH ###	586	[1,2,4,8589934592,1,0]
-483	hdfs://### HDFS PATH ###	4174	[1,2,4,8589934592,1,0]
-484	hdfs://### HDFS PATH ###	102	[1,2,4,8589934592,1,0]
-485	hdfs://### HDFS PATH ###	3734	[1,2,4,8589934592,1,0]
-487	hdfs://### HDFS PATH ###	3804	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	1128	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	1800	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	3344	[1,2,4,8589934592,1,0]
-489	hdfs://### HDFS PATH ###	742	[1,2,4,8589934592,1,0]
-490	hdfs://### HDFS PATH ###	2640	[1,2,4,8589934592,1,0]
-491	hdfs://### HDFS PATH ###	4710	[1,2,4,8589934592,1,0]
-492	hdfs://### HDFS PATH ###	3410	[1,2,4,8589934592,1,0]
-492	hdfs://### HDFS PATH ###	5362	[1,2,4,8589934592,1,0]
-493	hdfs://### HDFS PATH ###	4998	[1,2,4,8589934592,1,0]
-494	hdfs://### HDFS PATH ###	622	[1,2,4,8589934592,1,0]
-495	hdfs://### HDFS PATH ###	316	[1,2,4,8589934592,1,0]
-496	hdfs://### HDFS PATH ###	2076	[1,2,4,8589934592,1,0]
-497	hdfs://### HDFS PATH ###	3068	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	1332	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	3262	[1,2,4,8589934592,1,0]
-498	hdfs://### HDFS PATH ###	5418	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	3060	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	3864	[1,2,4,8589934592,1,0]
-5	hdfs://### HDFS PATH ###	4540	[1,2,4,8589934592,1,0]
-51	hdfs://### HDFS PATH ###	1462	[1,2,4,8589934592,1,0]
-51	hdfs://### HDFS PATH ###	2308	[1,2,4,8589934592,1,0]
-53	hdfs://### HDFS PATH ###	4186	[1,2,4,8589934592,1,0]
-54	hdfs://### HDFS PATH ###	1440	[1,2,4,8589934592,1,0]
-57	hdfs://### HDFS PATH ###	1024	[1,2,4,8589934592,1,0]
-58	hdfs://### HDFS PATH ###	1906	[1,2,4,8589934592,1,0]
-58	hdfs://### HDFS PATH ###	3128	[1,2,4,8589934592,1,0]
-64	hdfs://### HDFS PATH ###	3516	[1,2,4,8589934592,1,0]
-65	hdfs://### HDFS PATH ###	1592	[1,2,4,8589934592,1,0]
-66	hdfs://### HDFS PATH ###	198	[1,2,4,8589934592,1,0]
-67	hdfs://### HDFS PATH ###	1754	[1,2,4,8589934592,1,0]
-67	hdfs://### HDFS PATH ###	5306	[1,2,4,8589934592,1,0]
-69	hdfs://### HDFS PATH ###	3570	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	3794	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	4548	[1,2,4,8589934592,1,0]
-70	hdfs://### HDFS PATH ###	4640	[1,2,4,8589934592,1,0]
-72	hdfs://### HDFS PATH ###	1208	[1,2,4,8589934592,1,0]
-72	hdfs://### HDFS PATH ###	2792	[1,2,4,8589934592,1,0]
-74	hdfs://### HDFS PATH ###	3548	[1,2,4,8589934592,1,0]
-76	hdfs://### HDFS PATH ###	3378	[1,2,4,8589934592,1,0]
-76	hdfs://### HDFS PATH ###	3538	[1,2,4,8589934592,1,0]
-77	hdfs://### HDFS PATH ###	2622	[1,2,4,8589934592,1,0]
-78	hdfs://### HDFS PATH ###	3368	[1,2,4,8589934592,1,0]
-8	hdfs://### HDFS PATH ###	1916	[1,2,4,8589934592,1,0]
-80	hdfs://### HDFS PATH ###	4058	[1,2,4,8589934592,1,0]
-82	hdfs://### HDFS PATH ###	396	[1,2,4,8589934592,1,0]
-83	hdfs://### HDFS PATH ###	1674	[1,2,4,8589934592,1,0]
-83	hdfs://### HDFS PATH ###	5070	[1,2,4,8589934592,1,0]
-84	hdfs://### HDFS PATH ###	1872	[1,2,4,8589934592,1,0]
-84	hdfs://### HDFS PATH ###	5606	[1,2,4,8589934592,1,0]
-85	hdfs://### HDFS PATH ###	2612	[1,2,4,8589934592,1,0]
-86	hdfs://### HDFS PATH ###	12	[1,2,4,8589934592,1,0]
-87	hdfs://### HDFS PATH ###	2652	[1,2,4,8589934592,1,0]
-9	hdfs://### HDFS PATH ###	5398	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	2802	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	4304	[1,2,4,8589934592,1,0]
-90	hdfs://### HDFS PATH ###	5744	[1,2,4,8589934592,1,0]
-92	hdfs://### HDFS PATH ###	1176	[1,2,4,8589934592,1,0]
-95	hdfs://### HDFS PATH ###	2400	[1,2,4,8589934592,1,0]
-95	hdfs://### HDFS PATH ###	3160	[1,2,4,8589934592,1,0]
-96	hdfs://### HDFS PATH ###	2216	[1,2,4,8589934592,1,0]
-97	hdfs://### HDFS PATH ###	5572	[1,2,4,8589934592,1,0]
-97	hdfs://### HDFS PATH ###	5802	[1,2,4,8589934592,1,0]
-98	hdfs://### HDFS PATH ###	2458	[1,2,4,8589934592,1,0]
-98	hdfs://### HDFS PATH ###	92	[1,2,4,8589934592,1,0]
-PREHOOK: query: SELECT * FROM default__src_src2_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src2_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src2_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src2_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-val_0	hdfs://### HDFS PATH ###	2088	[1,2,4,8589934592,1,0]
-val_0	hdfs://### HDFS PATH ###	2632	[1,2,4,8589934592,1,0]
-val_0	hdfs://### HDFS PATH ###	968	[1,2,4,8589934592,1,0]
-val_10	hdfs://### HDFS PATH ###	2846	[1,2,4,8589934592,1,0]
-val_100	hdfs://### HDFS PATH ###	2156	[1,2,4,8589934592,1,0]
-val_100	hdfs://### HDFS PATH ###	5374	[1,2,4,8589934592,1,0]
-val_103	hdfs://### HDFS PATH ###	1484	[1,2,4,8589934592,1,0]
-val_103	hdfs://### HDFS PATH ###	3614	[1,2,4,8589934592,1,0]
-val_104	hdfs://### HDFS PATH ###	4114	[1,2,4,8589934592,1,0]
-val_104	hdfs://### HDFS PATH ###	4628	[1,2,4,8589934592,1,0]
-val_105	hdfs://### HDFS PATH ###	4196	[1,2,4,8589934592,1,0]
-val_11	hdfs://### HDFS PATH ###	3170	[1,2,4,8589934592,1,0]
-val_111	hdfs://### HDFS PATH ###	1186	[1,2,4,8589934592,1,0]
-val_113	hdfs://### HDFS PATH ###	3638	[1,2,4,8589934592,1,0]
-val_113	hdfs://### HDFS PATH ###	920	[1,2,4,8589934592,1,0]
-val_114	hdfs://### HDFS PATH ###	4280	[1,2,4,8589934592,1,0]
-val_116	hdfs://### HDFS PATH ###	3746	[1,2,4,8589934592,1,0]
-val_118	hdfs://### HDFS PATH ###	2686	[1,2,4,8589934592,1,0]
-val_118	hdfs://### HDFS PATH ###	2780	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	2064	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	3332	[1,2,4,8589934592,1,0]
-val_119	hdfs://### HDFS PATH ###	4674	[1,2,4,8589934592,1,0]
-val_12	hdfs://### HDFS PATH ###	1720	[1,2,4,8589934592,1,0]
-val_12	hdfs://### HDFS PATH ###	4362	[1,2,4,8589934592,1,0]
-val_120	hdfs://### HDFS PATH ###	2284	[1,2,4,8589934592,1,0]
-val_120	hdfs://### HDFS PATH ###	4830	[1,2,4,8589934592,1,0]
-val_125	hdfs://### HDFS PATH ###	1344	[1,2,4,8589934592,1,0]
-val_125	hdfs://### HDFS PATH ###	4468	[1,2,4,8589934592,1,0]
-val_126	hdfs://### HDFS PATH ###	5732	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	208	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	3896	[1,2,4,8589934592,1,0]
-val_128	hdfs://### HDFS PATH ###	988	[1,2,4,8589934592,1,0]
-val_129	hdfs://### HDFS PATH ###	1094	[1,2,4,8589934592,1,0]
-val_129	hdfs://### HDFS PATH ###	2040	[1,2,4,8589934592,1,0]
-val_131	hdfs://### HDFS PATH ###	2296	[1,2,4,8589934592,1,0]
-val_133	hdfs://### HDFS PATH ###	5164	[1,2,4,8589934592,1,0]
-val_134	hdfs://### HDFS PATH ###	2698	[1,2,4,8589934592,1,0]
-val_134	hdfs://### HDFS PATH ###	5294	[1,2,4,8589934592,1,0]
-val_136	hdfs://### HDFS PATH ###	5080	[1,2,4,8589934592,1,0]
-val_137	hdfs://### HDFS PATH ###	1650	[1,2,4,8589934592,1,0]
-val_137	hdfs://### HDFS PATH ###	2552	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	1472	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	1848	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	2734	[1,2,4,8589934592,1,0]
-val_138	hdfs://### HDFS PATH ###	3470	[1,2,4,8589934592,1,0]
-val_143	hdfs://### HDFS PATH ###	3226	[1,2,4,8589934592,1,0]
-val_145	hdfs://### HDFS PATH ###	304	[1,2,4,8589934592,1,0]
-val_146	hdfs://### HDFS PATH ###	232	[1,2,4,8589934592,1,0]
-val_146	hdfs://### HDFS PATH ###	5430	[1,2,4,8589934592,1,0]
-val_149	hdfs://### HDFS PATH ###	1058	[1,2,4,8589934592,1,0]
-val_149	hdfs://### HDFS PATH ###	3422	[1,2,4,8589934592,1,0]
-val_15	hdfs://### HDFS PATH ###	2770	[1,2,4,8589934592,1,0]
-val_15	hdfs://### HDFS PATH ###	386	[1,2,4,8589934592,1,0]
-val_150	hdfs://### HDFS PATH ###	150	[1,2,4,8589934592,1,0]
-val_152	hdfs://### HDFS PATH ###	280	[1,2,4,8589934592,1,0]
-val_152	hdfs://### HDFS PATH ###	5648	[1,2,4,8589934592,1,0]
-val_153	hdfs://### HDFS PATH ###	502	[1,2,4,8589934592,1,0]
-val_155	hdfs://### HDFS PATH ###	932	[1,2,4,8589934592,1,0]
-val_156	hdfs://### HDFS PATH ###	2352	[1,2,4,8589934592,1,0]
-val_157	hdfs://### HDFS PATH ###	1140	[1,2,4,8589934592,1,0]
-val_158	hdfs://### HDFS PATH ###	2052	[1,2,4,8589934592,1,0]
-val_160	hdfs://### HDFS PATH ###	3274	[1,2,4,8589934592,1,0]
-val_162	hdfs://### HDFS PATH ###	754	[1,2,4,8589934592,1,0]
-val_163	hdfs://### HDFS PATH ###	4650	[1,2,4,8589934592,1,0]
-val_164	hdfs://### HDFS PATH ###	4408	[1,2,4,8589934592,1,0]
-val_164	hdfs://### HDFS PATH ###	4492	[1,2,4,8589934592,1,0]
-val_165	hdfs://### HDFS PATH ###	2236	[1,2,4,8589934592,1,0]
-val_165	hdfs://### HDFS PATH ###	44	[1,2,4,8589934592,1,0]
-val_166	hdfs://### HDFS PATH ###	418	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	3686	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	5502	[1,2,4,8589934592,1,0]
-val_167	hdfs://### HDFS PATH ###	874	[1,2,4,8589934592,1,0]
-val_168	hdfs://### HDFS PATH ###	3180	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	1308	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	2588	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	4854	[1,2,4,8589934592,1,0]
-val_169	hdfs://### HDFS PATH ###	5754	[1,2,4,8589934592,1,0]
-val_17	hdfs://### HDFS PATH ###	910	[1,2,4,8589934592,1,0]
-val_170	hdfs://### HDFS PATH ###	1106	[1,2,4,8589934592,1,0]
-val_172	hdfs://### HDFS PATH ###	2018	[1,2,4,8589934592,1,0]
-val_172	hdfs://### HDFS PATH ###	5104	[1,2,4,8589934592,1,0]
-val_174	hdfs://### HDFS PATH ###	598	[1,2,4,8589934592,1,0]
-val_174	hdfs://### HDFS PATH ###	682	[1,2,4,8589934592,1,0]
-val_175	hdfs://### HDFS PATH ###	4150	[1,2,4,8589934592,1,0]
-val_175	hdfs://### HDFS PATH ###	5176	[1,2,4,8589934592,1,0]
-val_176	hdfs://### HDFS PATH ###	1428	[1,2,4,8589934592,1,0]
-val_176	hdfs://### HDFS PATH ###	1556	[1,2,4,8589934592,1,0]
-val_177	hdfs://### HDFS PATH ###	3036	[1,2,4,8589934592,1,0]
-val_178	hdfs://### HDFS PATH ###	4938	[1,2,4,8589934592,1,0]
-val_179	hdfs://### HDFS PATH ###	2006	[1,2,4,8589934592,1,0]
-val_179	hdfs://### HDFS PATH ###	2674	[1,2,4,8589934592,1,0]
-val_18	hdfs://### HDFS PATH ###	5340	[1,2,4,8589934592,1,0]
-val_18	hdfs://### HDFS PATH ###	5514	[1,2,4,8589934592,1,0]
-val_180	hdfs://### HDFS PATH ###	1696	[1,2,4,8589934592,1,0]
-val_181	hdfs://### HDFS PATH ###	1742	[1,2,4,8589934592,1,0]
-val_183	hdfs://### HDFS PATH ###	5536	[1,2,4,8589934592,1,0]
-val_186	hdfs://### HDFS PATH ###	5466	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	1416	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	2492	[1,2,4,8589934592,1,0]
-val_187	hdfs://### HDFS PATH ###	4516	[1,2,4,8589934592,1,0]
-val_189	hdfs://### HDFS PATH ###	5188	[1,2,4,8589934592,1,0]
-val_19	hdfs://### HDFS PATH ###	2824	[1,2,4,8589934592,1,0]
-val_190	hdfs://### HDFS PATH ###	4244	[1,2,4,8589934592,1,0]
-val_191	hdfs://### HDFS PATH ###	2192	[1,2,4,8589934592,1,0]
-val_191	hdfs://### HDFS PATH ###	3852	[1,2,4,8589934592,1,0]
-val_192	hdfs://### HDFS PATH ###	1392	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	126	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	4078	[1,2,4,8589934592,1,0]
-val_193	hdfs://### HDFS PATH ###	514	[1,2,4,8589934592,1,0]
-val_194	hdfs://### HDFS PATH ###	5684	[1,2,4,8589934592,1,0]
-val_195	hdfs://### HDFS PATH ###	3286	[1,2,4,8589934592,1,0]
-val_195	hdfs://### HDFS PATH ###	886	[1,2,4,8589934592,1,0]
-val_196	hdfs://### HDFS PATH ###	2410	[1,2,4,8589934592,1,0]
-val_197	hdfs://### HDFS PATH ###	2108	[1,2,4,8589934592,1,0]
-val_197	hdfs://### HDFS PATH ###	2480	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	2180	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	4806	[1,2,4,8589934592,1,0]
-val_199	hdfs://### HDFS PATH ###	646	[1,2,4,8589934592,1,0]
-val_2	hdfs://### HDFS PATH ###	4004	[1,2,4,8589934592,1,0]
-val_20	hdfs://### HDFS PATH ###	1118	[1,2,4,8589934592,1,0]
-val_200	hdfs://### HDFS PATH ###	4698	[1,2,4,8589934592,1,0]
-val_200	hdfs://### HDFS PATH ###	5790	[1,2,4,8589934592,1,0]
-val_201	hdfs://### HDFS PATH ###	4384	[1,2,4,8589934592,1,0]
-val_202	hdfs://### HDFS PATH ###	3932	[1,2,4,8589934592,1,0]
-val_203	hdfs://### HDFS PATH ###	4314	[1,2,4,8589934592,1,0]
-val_203	hdfs://### HDFS PATH ###	944	[1,2,4,8589934592,1,0]
-val_205	hdfs://### HDFS PATH ###	1046	[1,2,4,8589934592,1,0]
-val_205	hdfs://### HDFS PATH ###	2272	[1,2,4,8589934592,1,0]
-val_207	hdfs://### HDFS PATH ###	5022	[1,2,4,8589934592,1,0]
-val_207	hdfs://### HDFS PATH ###	634	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	1272	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	1948	[1,2,4,8589934592,1,0]
-val_208	hdfs://### HDFS PATH ###	670	[1,2,4,8589934592,1,0]
-val_209	hdfs://### HDFS PATH ###	3504	[1,2,4,8589934592,1,0]
-val_209	hdfs://### HDFS PATH ###	374	[1,2,4,8589934592,1,0]
-val_213	hdfs://### HDFS PATH ###	1508	[1,2,4,8589934592,1,0]
-val_213	hdfs://### HDFS PATH ###	220	[1,2,4,8589934592,1,0]
-val_214	hdfs://### HDFS PATH ###	5116	[1,2,4,8589934592,1,0]
-val_216	hdfs://### HDFS PATH ###	1520	[1,2,4,8589934592,1,0]
-val_216	hdfs://### HDFS PATH ###	3650	[1,2,4,8589934592,1,0]
-val_217	hdfs://### HDFS PATH ###	1860	[1,2,4,8589934592,1,0]
-val_217	hdfs://### HDFS PATH ###	4396	[1,2,4,8589934592,1,0]
-val_218	hdfs://### HDFS PATH ###	3446	[1,2,4,8589934592,1,0]
-val_219	hdfs://### HDFS PATH ###	3710	[1,2,4,8589934592,1,0]
-val_219	hdfs://### HDFS PATH ###	478	[1,2,4,8589934592,1,0]
-val_221	hdfs://### HDFS PATH ###	1164	[1,2,4,8589934592,1,0]
-val_221	hdfs://### HDFS PATH ###	1580	[1,2,4,8589934592,1,0]
-val_222	hdfs://### HDFS PATH ###	5720	[1,2,4,8589934592,1,0]
-val_223	hdfs://### HDFS PATH ###	3398	[1,2,4,8589934592,1,0]
-val_223	hdfs://### HDFS PATH ###	3758	[1,2,4,8589934592,1,0]
-val_224	hdfs://### HDFS PATH ###	174	[1,2,4,8589934592,1,0]
-val_224	hdfs://### HDFS PATH ###	2892	[1,2,4,8589934592,1,0]
-val_226	hdfs://### HDFS PATH ###	3048	[1,2,4,8589934592,1,0]
-val_228	hdfs://### HDFS PATH ###	3458	[1,2,4,8589934592,1,0]
-val_229	hdfs://### HDFS PATH ###	3202	[1,2,4,8589934592,1,0]
-val_229	hdfs://### HDFS PATH ###	3956	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	1730	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	1936	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	2260	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	3580	[1,2,4,8589934592,1,0]
-val_230	hdfs://### HDFS PATH ###	4914	[1,2,4,8589934592,1,0]
-val_233	hdfs://### HDFS PATH ###	3214	[1,2,4,8589934592,1,0]
-val_233	hdfs://### HDFS PATH ###	5140	[1,2,4,8589934592,1,0]
-val_235	hdfs://### HDFS PATH ###	4046	[1,2,4,8589934592,1,0]
-val_237	hdfs://### HDFS PATH ###	4722	[1,2,4,8589934592,1,0]
-val_237	hdfs://### HDFS PATH ###	574	[1,2,4,8589934592,1,0]
-val_238	hdfs://### HDFS PATH ###	0	[1,2,4,8589934592,1,0]
-val_238	hdfs://### HDFS PATH ###	2746	[1,2,4,8589934592,1,0]
-val_239	hdfs://### HDFS PATH ###	1496	[1,2,4,8589934592,1,0]
-val_239	hdfs://### HDFS PATH ###	3722	[1,2,4,8589934592,1,0]
-val_24	hdfs://### HDFS PATH ###	1972	[1,2,4,8589934592,1,0]
-val_24	hdfs://### HDFS PATH ###	4594	[1,2,4,8589934592,1,0]
-val_241	hdfs://### HDFS PATH ###	1662	[1,2,4,8589934592,1,0]
-val_242	hdfs://### HDFS PATH ###	2940	[1,2,4,8589934592,1,0]
-val_242	hdfs://### HDFS PATH ###	3012	[1,2,4,8589934592,1,0]
-val_244	hdfs://### HDFS PATH ###	3872	[1,2,4,8589934592,1,0]
-val_247	hdfs://### HDFS PATH ###	718	[1,2,4,8589934592,1,0]
-val_248	hdfs://### HDFS PATH ###	4758	[1,2,4,8589934592,1,0]
-val_249	hdfs://### HDFS PATH ###	5034	[1,2,4,8589934592,1,0]
-val_252	hdfs://### HDFS PATH ###	454	[1,2,4,8589934592,1,0]
-val_255	hdfs://### HDFS PATH ###	4616	[1,2,4,8589934592,1,0]
-val_255	hdfs://### HDFS PATH ###	68	[1,2,4,8589934592,1,0]
-val_256	hdfs://### HDFS PATH ###	3770	[1,2,4,8589934592,1,0]
-val_256	hdfs://### HDFS PATH ###	5272	[1,2,4,8589934592,1,0]
-val_257	hdfs://### HDFS PATH ###	4208	[1,2,4,8589934592,1,0]
-val_258	hdfs://### HDFS PATH ###	4292	[1,2,4,8589934592,1,0]
-val_26	hdfs://### HDFS PATH ###	2226	[1,2,4,8589934592,1,0]
-val_26	hdfs://### HDFS PATH ###	5284	[1,2,4,8589934592,1,0]
-val_260	hdfs://### HDFS PATH ###	1764	[1,2,4,8589934592,1,0]
-val_262	hdfs://### HDFS PATH ###	4326	[1,2,4,8589934592,1,0]
-val_263	hdfs://### HDFS PATH ###	3782	[1,2,4,8589934592,1,0]
-val_265	hdfs://### HDFS PATH ###	114	[1,2,4,8589934592,1,0]
-val_265	hdfs://### HDFS PATH ###	5046	[1,2,4,8589934592,1,0]
-val_266	hdfs://### HDFS PATH ###	814	[1,2,4,8589934592,1,0]
-val_27	hdfs://### HDFS PATH ###	34	[1,2,4,8589934592,1,0]
-val_272	hdfs://### HDFS PATH ###	1836	[1,2,4,8589934592,1,0]
-val_272	hdfs://### HDFS PATH ###	2976	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	162	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	2868	[1,2,4,8589934592,1,0]
-val_273	hdfs://### HDFS PATH ###	5524	[1,2,4,8589934592,1,0]
-val_274	hdfs://### HDFS PATH ###	3698	[1,2,4,8589934592,1,0]
-val_275	hdfs://### HDFS PATH ###	1638	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	1260	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	2856	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	362	[1,2,4,8589934592,1,0]
-val_277	hdfs://### HDFS PATH ###	4902	[1,2,4,8589934592,1,0]
-val_278	hdfs://### HDFS PATH ###	1544	[1,2,4,8589934592,1,0]
-val_278	hdfs://### HDFS PATH ###	80	[1,2,4,8589934592,1,0]
-val_28	hdfs://### HDFS PATH ###	5616	[1,2,4,8589934592,1,0]
-val_280	hdfs://### HDFS PATH ###	1226	[1,2,4,8589934592,1,0]
-val_280	hdfs://### HDFS PATH ###	3992	[1,2,4,8589934592,1,0]
-val_281	hdfs://### HDFS PATH ###	350	[1,2,4,8589934592,1,0]
-val_281	hdfs://### HDFS PATH ###	5548	[1,2,4,8589934592,1,0]
-val_282	hdfs://### HDFS PATH ###	2468	[1,2,4,8589934592,1,0]
-val_282	hdfs://### HDFS PATH ###	2722	[1,2,4,8589934592,1,0]
-val_283	hdfs://### HDFS PATH ###	4022	[1,2,4,8589934592,1,0]
-val_284	hdfs://### HDFS PATH ###	1708	[1,2,4,8589934592,1,0]
-val_285	hdfs://### HDFS PATH ###	5478	[1,2,4,8589934592,1,0]
-val_286	hdfs://### HDFS PATH ###	1404	[1,2,4,8589934592,1,0]
-val_287	hdfs://### HDFS PATH ###	490	[1,2,4,8589934592,1,0]
-val_288	hdfs://### HDFS PATH ###	2422	[1,2,4,8589934592,1,0]
-val_288	hdfs://### HDFS PATH ###	3840	[1,2,4,8589934592,1,0]
-val_289	hdfs://### HDFS PATH ###	1568	[1,2,4,8589934592,1,0]
-val_291	hdfs://### HDFS PATH ###	4582	[1,2,4,8589934592,1,0]
-val_292	hdfs://### HDFS PATH ###	466	[1,2,4,8589934592,1,0]
-val_296	hdfs://### HDFS PATH ###	3626	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	2168	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	4456	[1,2,4,8589934592,1,0]
-val_298	hdfs://### HDFS PATH ###	5386	[1,2,4,8589934592,1,0]
-val_30	hdfs://### HDFS PATH ###	3494	[1,2,4,8589934592,1,0]
-val_302	hdfs://### HDFS PATH ###	1034	[1,2,4,8589934592,1,0]
-val_305	hdfs://### HDFS PATH ###	4782	[1,2,4,8589934592,1,0]
-val_306	hdfs://### HDFS PATH ###	2880	[1,2,4,8589934592,1,0]
-val_307	hdfs://### HDFS PATH ###	2812	[1,2,4,8589934592,1,0]
-val_307	hdfs://### HDFS PATH ###	5672	[1,2,4,8589934592,1,0]
-val_308	hdfs://### HDFS PATH ###	2388	[1,2,4,8589934592,1,0]
-val_309	hdfs://### HDFS PATH ###	2904	[1,2,4,8589934592,1,0]
-val_309	hdfs://### HDFS PATH ###	790	[1,2,4,8589934592,1,0]
-val_310	hdfs://### HDFS PATH ###	4962	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	1000	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	1626	[1,2,4,8589934592,1,0]
-val_311	hdfs://### HDFS PATH ###	22	[1,2,4,8589934592,1,0]
-val_315	hdfs://### HDFS PATH ###	5594	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	1012	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	2576	[1,2,4,8589934592,1,0]
-val_316	hdfs://### HDFS PATH ###	3944	[1,2,4,8589934592,1,0]
-val_317	hdfs://### HDFS PATH ###	3104	[1,2,4,8589934592,1,0]
-val_317	hdfs://### HDFS PATH ###	4974	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	1602	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	2504	[1,2,4,8589934592,1,0]
-val_318	hdfs://### HDFS PATH ###	2516	[1,2,4,8589934592,1,0]
-val_321	hdfs://### HDFS PATH ###	3308	[1,2,4,8589934592,1,0]
-val_321	hdfs://### HDFS PATH ###	4090	[1,2,4,8589934592,1,0]
-val_322	hdfs://### HDFS PATH ###	2096	[1,2,4,8589934592,1,0]
-val_322	hdfs://### HDFS PATH ###	3250	[1,2,4,8589934592,1,0]
-val_323	hdfs://### HDFS PATH ###	4878	[1,2,4,8589934592,1,0]
-val_325	hdfs://### HDFS PATH ###	4890	[1,2,4,8589934592,1,0]
-val_325	hdfs://### HDFS PATH ###	862	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	2248	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	2928	[1,2,4,8589934592,1,0]
-val_327	hdfs://### HDFS PATH ###	338	[1,2,4,8589934592,1,0]
-val_33	hdfs://### HDFS PATH ###	3592	[1,2,4,8589934592,1,0]
-val_331	hdfs://### HDFS PATH ###	2988	[1,2,4,8589934592,1,0]
-val_331	hdfs://### HDFS PATH ###	4034	[1,2,4,8589934592,1,0]
-val_332	hdfs://### HDFS PATH ###	1614	[1,2,4,8589934592,1,0]
-val_333	hdfs://### HDFS PATH ###	1684	[1,2,4,8589934592,1,0]
-val_333	hdfs://### HDFS PATH ###	4986	[1,2,4,8589934592,1,0]
-val_335	hdfs://### HDFS PATH ###	4102	[1,2,4,8589934592,1,0]
-val_336	hdfs://### HDFS PATH ###	3148	[1,2,4,8589934592,1,0]
-val_338	hdfs://### HDFS PATH ###	526	[1,2,4,8589934592,1,0]
-val_339	hdfs://### HDFS PATH ###	956	[1,2,4,8589934592,1,0]
-val_34	hdfs://### HDFS PATH ###	3192	[1,2,4,8589934592,1,0]
-val_341	hdfs://### HDFS PATH ###	5406	[1,2,4,8589934592,1,0]
-val_342	hdfs://### HDFS PATH ###	3558	[1,2,4,8589934592,1,0]
-val_342	hdfs://### HDFS PATH ###	838	[1,2,4,8589934592,1,0]
-val_344	hdfs://### HDFS PATH ###	3674	[1,2,4,8589934592,1,0]
-val_344	hdfs://### HDFS PATH ###	5560	[1,2,4,8589934592,1,0]
-val_345	hdfs://### HDFS PATH ###	1082	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	1882	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	1960	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	4338	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	5490	[1,2,4,8589934592,1,0]
-val_348	hdfs://### HDFS PATH ###	5660	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	1238	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	3138	[1,2,4,8589934592,1,0]
-val_35	hdfs://### HDFS PATH ###	4012	[1,2,4,8589934592,1,0]
-val_351	hdfs://### HDFS PATH ###	4604	[1,2,4,8589934592,1,0]
-val_353	hdfs://### HDFS PATH ###	1812	[1,2,4,8589934592,1,0]
-val_353	hdfs://### HDFS PATH ###	5092	[1,2,4,8589934592,1,0]
-val_356	hdfs://### HDFS PATH ###	1284	[1,2,4,8589934592,1,0]
-val_360	hdfs://### HDFS PATH ###	4746	[1,2,4,8589934592,1,0]
-val_362	hdfs://### HDFS PATH ###	5454	[1,2,4,8589934592,1,0]
-val_364	hdfs://### HDFS PATH ###	2662	[1,2,4,8589934592,1,0]
-val_365	hdfs://### HDFS PATH ###	802	[1,2,4,8589934592,1,0]
-val_366	hdfs://### HDFS PATH ###	4138	[1,2,4,8589934592,1,0]
-val_367	hdfs://### HDFS PATH ###	3662	[1,2,4,8589934592,1,0]
-val_367	hdfs://### HDFS PATH ###	850	[1,2,4,8589934592,1,0]
-val_368	hdfs://### HDFS PATH ###	3602	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	186	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	2564	[1,2,4,8589934592,1,0]
-val_369	hdfs://### HDFS PATH ###	2952	[1,2,4,8589934592,1,0]
-val_37	hdfs://### HDFS PATH ###	328	[1,2,4,8589934592,1,0]
-val_37	hdfs://### HDFS PATH ###	5626	[1,2,4,8589934592,1,0]
-val_373	hdfs://### HDFS PATH ###	1824	[1,2,4,8589934592,1,0]
-val_374	hdfs://### HDFS PATH ###	268	[1,2,4,8589934592,1,0]
-val_375	hdfs://### HDFS PATH ###	5212	[1,2,4,8589934592,1,0]
-val_377	hdfs://### HDFS PATH ###	766	[1,2,4,8589934592,1,0]
-val_378	hdfs://### HDFS PATH ###	1152	[1,2,4,8589934592,1,0]
-val_379	hdfs://### HDFS PATH ###	5328	[1,2,4,8589934592,1,0]
-val_382	hdfs://### HDFS PATH ###	1320	[1,2,4,8589934592,1,0]
-val_382	hdfs://### HDFS PATH ###	4528	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	1788	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	5260	[1,2,4,8589934592,1,0]
-val_384	hdfs://### HDFS PATH ###	5316	[1,2,4,8589934592,1,0]
-val_386	hdfs://### HDFS PATH ###	1356	[1,2,4,8589934592,1,0]
-val_389	hdfs://### HDFS PATH ###	2916	[1,2,4,8589934592,1,0]
-val_392	hdfs://### HDFS PATH ###	2964	[1,2,4,8589934592,1,0]
-val_393	hdfs://### HDFS PATH ###	2132	[1,2,4,8589934592,1,0]
-val_394	hdfs://### HDFS PATH ###	562	[1,2,4,8589934592,1,0]
-val_395	hdfs://### HDFS PATH ###	2710	[1,2,4,8589934592,1,0]
-val_395	hdfs://### HDFS PATH ###	3116	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	3092	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	4372	[1,2,4,8589934592,1,0]
-val_396	hdfs://### HDFS PATH ###	706	[1,2,4,8589934592,1,0]
-val_397	hdfs://### HDFS PATH ###	4558	[1,2,4,8589934592,1,0]
-val_397	hdfs://### HDFS PATH ###	778	[1,2,4,8589934592,1,0]
-val_399	hdfs://### HDFS PATH ###	1296	[1,2,4,8589934592,1,0]
-val_399	hdfs://### HDFS PATH ###	694	[1,2,4,8589934592,1,0]
-val_4	hdfs://### HDFS PATH ###	1218	[1,2,4,8589934592,1,0]
-val_400	hdfs://### HDFS PATH ###	5778	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	138	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	3000	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	3828	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	4268	[1,2,4,8589934592,1,0]
-val_401	hdfs://### HDFS PATH ###	5224	[1,2,4,8589934592,1,0]
-val_402	hdfs://### HDFS PATH ###	3080	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	406	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	4162	[1,2,4,8589934592,1,0]
-val_403	hdfs://### HDFS PATH ###	5766	[1,2,4,8589934592,1,0]
-val_404	hdfs://### HDFS PATH ###	1776	[1,2,4,8589934592,1,0]
-val_404	hdfs://### HDFS PATH ###	2318	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	244	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	4220	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	4256	[1,2,4,8589934592,1,0]
-val_406	hdfs://### HDFS PATH ###	5152	[1,2,4,8589934592,1,0]
-val_407	hdfs://### HDFS PATH ###	5248	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	2528	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	4232	[1,2,4,8589934592,1,0]
-val_409	hdfs://### HDFS PATH ###	56	[1,2,4,8589934592,1,0]
-val_41	hdfs://### HDFS PATH ###	3388	[1,2,4,8589934592,1,0]
-val_411	hdfs://### HDFS PATH ###	1924	[1,2,4,8589934592,1,0]
-val_413	hdfs://### HDFS PATH ###	2600	[1,2,4,8589934592,1,0]
-val_413	hdfs://### HDFS PATH ###	610	[1,2,4,8589934592,1,0]
-val_414	hdfs://### HDFS PATH ###	4686	[1,2,4,8589934592,1,0]
-val_414	hdfs://### HDFS PATH ###	5696	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	430	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	4794	[1,2,4,8589934592,1,0]
-val_417	hdfs://### HDFS PATH ###	730	[1,2,4,8589934592,1,0]
-val_418	hdfs://### HDFS PATH ###	2204	[1,2,4,8589934592,1,0]
-val_419	hdfs://### HDFS PATH ###	2758	[1,2,4,8589934592,1,0]
-val_42	hdfs://### HDFS PATH ###	2030	[1,2,4,8589934592,1,0]
-val_42	hdfs://### HDFS PATH ###	3298	[1,2,4,8589934592,1,0]
-val_421	hdfs://### HDFS PATH ###	5236	[1,2,4,8589934592,1,0]
-val_424	hdfs://### HDFS PATH ###	4350	[1,2,4,8589934592,1,0]
-val_424	hdfs://### HDFS PATH ###	4504	[1,2,4,8589934592,1,0]
-val_427	hdfs://### HDFS PATH ###	1248	[1,2,4,8589934592,1,0]
-val_429	hdfs://### HDFS PATH ###	256	[1,2,4,8589934592,1,0]
-val_429	hdfs://### HDFS PATH ###	4842	[1,2,4,8589934592,1,0]
-val_43	hdfs://### HDFS PATH ###	2330	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	1532	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	3320	[1,2,4,8589934592,1,0]
-val_430	hdfs://### HDFS PATH ###	442	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	1994	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	4420	[1,2,4,8589934592,1,0]
-val_431	hdfs://### HDFS PATH ###	4480	[1,2,4,8589934592,1,0]
-val_432	hdfs://### HDFS PATH ###	3920	[1,2,4,8589934592,1,0]
-val_435	hdfs://### HDFS PATH ###	2834	[1,2,4,8589934592,1,0]
-val_436	hdfs://### HDFS PATH ###	2340	[1,2,4,8589934592,1,0]
-val_437	hdfs://### HDFS PATH ###	1368	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	1070	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	3884	[1,2,4,8589934592,1,0]
-val_438	hdfs://### HDFS PATH ###	4662	[1,2,4,8589934592,1,0]
-val_439	hdfs://### HDFS PATH ###	4734	[1,2,4,8589934592,1,0]
-val_439	hdfs://### HDFS PATH ###	826	[1,2,4,8589934592,1,0]
-val_44	hdfs://### HDFS PATH ###	4068	[1,2,4,8589934592,1,0]
-val_443	hdfs://### HDFS PATH ###	4866	[1,2,4,8589934592,1,0]
-val_444	hdfs://### HDFS PATH ###	4818	[1,2,4,8589934592,1,0]
-val_446	hdfs://### HDFS PATH ###	538	[1,2,4,8589934592,1,0]
-val_448	hdfs://### HDFS PATH ###	5636	[1,2,4,8589934592,1,0]
-val_449	hdfs://### HDFS PATH ###	3434	[1,2,4,8589934592,1,0]
-val_452	hdfs://### HDFS PATH ###	3024	[1,2,4,8589934592,1,0]
-val_453	hdfs://### HDFS PATH ###	3482	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	2144	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	4432	[1,2,4,8589934592,1,0]
-val_454	hdfs://### HDFS PATH ###	5200	[1,2,4,8589934592,1,0]
-val_455	hdfs://### HDFS PATH ###	976	[1,2,4,8589934592,1,0]
-val_457	hdfs://### HDFS PATH ###	2446	[1,2,4,8589934592,1,0]
-val_458	hdfs://### HDFS PATH ###	3356	[1,2,4,8589934592,1,0]
-val_458	hdfs://### HDFS PATH ###	5442	[1,2,4,8589934592,1,0]
-val_459	hdfs://### HDFS PATH ###	1450	[1,2,4,8589934592,1,0]
-val_459	hdfs://### HDFS PATH ###	550	[1,2,4,8589934592,1,0]
-val_460	hdfs://### HDFS PATH ###	5010	[1,2,4,8589934592,1,0]
-val_462	hdfs://### HDFS PATH ###	5128	[1,2,4,8589934592,1,0]
-val_462	hdfs://### HDFS PATH ###	5350	[1,2,4,8589934592,1,0]
-val_463	hdfs://### HDFS PATH ###	1982	[1,2,4,8589934592,1,0]
-val_463	hdfs://### HDFS PATH ###	3980	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	1894	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	4126	[1,2,4,8589934592,1,0]
-val_466	hdfs://### HDFS PATH ###	658	[1,2,4,8589934592,1,0]
-val_467	hdfs://### HDFS PATH ###	3908	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	2120	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	2376	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	3526	[1,2,4,8589934592,1,0]
-val_468	hdfs://### HDFS PATH ###	4950	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	1380	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	2364	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	292	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	3968	[1,2,4,8589934592,1,0]
-val_469	hdfs://### HDFS PATH ###	5582	[1,2,4,8589934592,1,0]
-val_47	hdfs://### HDFS PATH ###	1198	[1,2,4,8589934592,1,0]
-val_470	hdfs://### HDFS PATH ###	2540	[1,2,4,8589934592,1,0]
-val_472	hdfs://### HDFS PATH ###	3238	[1,2,4,8589934592,1,0]
-val_475	hdfs://### HDFS PATH ###	898	[1,2,4,8589934592,1,0]
-val_477	hdfs://### HDFS PATH ###	5708	[1,2,4,8589934592,1,0]
-val_478	hdfs://### HDFS PATH ###	4444	[1,2,4,8589934592,1,0]
-val_478	hdfs://### HDFS PATH ###	4926	[1,2,4,8589934592,1,0]
-val_479	hdfs://### HDFS PATH ###	4770	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	3816	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	4570	[1,2,4,8589934592,1,0]
-val_480	hdfs://### HDFS PATH ###	5058	[1,2,4,8589934592,1,0]
-val_481	hdfs://### HDFS PATH ###	2434	[1,2,4,8589934592,1,0]
-val_482	hdfs://### HDFS PATH ###	586	[1,2,4,8589934592,1,0]
-val_483	hdfs://### HDFS PATH ###	4174	[1,2,4,8589934592,1,0]
-val_484	hdfs://### HDFS PATH ###	102	[1,2,4,8589934592,1,0]
-val_485	hdfs://### HDFS PATH ###	3734	[1,2,4,8589934592,1,0]
-val_487	hdfs://### HDFS PATH ###	3804	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	1128	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	1800	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	3344	[1,2,4,8589934592,1,0]
-val_489	hdfs://### HDFS PATH ###	742	[1,2,4,8589934592,1,0]
-val_490	hdfs://### HDFS PATH ###	2640	[1,2,4,8589934592,1,0]
-val_491	hdfs://### HDFS PATH ###	4710	[1,2,4,8589934592,1,0]
-val_492	hdfs://### HDFS PATH ###	3410	[1,2,4,8589934592,1,0]
-val_492	hdfs://### HDFS PATH ###	5362	[1,2,4,8589934592,1,0]
-val_493	hdfs://### HDFS PATH ###	4998	[1,2,4,8589934592,1,0]
-val_494	hdfs://### HDFS PATH ###	622	[1,2,4,8589934592,1,0]
-val_495	hdfs://### HDFS PATH ###	316	[1,2,4,8589934592,1,0]
-val_496	hdfs://### HDFS PATH ###	2076	[1,2,4,8589934592,1,0]
-val_497	hdfs://### HDFS PATH ###	3068	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	1332	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	3262	[1,2,4,8589934592,1,0]
-val_498	hdfs://### HDFS PATH ###	5418	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	3060	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	3864	[1,2,4,8589934592,1,0]
-val_5	hdfs://### HDFS PATH ###	4540	[1,2,4,8589934592,1,0]
-val_51	hdfs://### HDFS PATH ###	1462	[1,2,4,8589934592,1,0]
-val_51	hdfs://### HDFS PATH ###	2308	[1,2,4,8589934592,1,0]
-val_53	hdfs://### HDFS PATH ###	4186	[1,2,4,8589934592,1,0]
-val_54	hdfs://### HDFS PATH ###	1440	[1,2,4,8589934592,1,0]
-val_57	hdfs://### HDFS PATH ###	1024	[1,2,4,8589934592,1,0]
-val_58	hdfs://### HDFS PATH ###	1906	[1,2,4,8589934592,1,0]
-val_58	hdfs://### HDFS PATH ###	3128	[1,2,4,8589934592,1,0]
-val_64	hdfs://### HDFS PATH ###	3516	[1,2,4,8589934592,1,0]
-val_65	hdfs://### HDFS PATH ###	1592	[1,2,4,8589934592,1,0]
-val_66	hdfs://### HDFS PATH ###	198	[1,2,4,8589934592,1,0]
-val_67	hdfs://### HDFS PATH ###	1754	[1,2,4,8589934592,1,0]
-val_67	hdfs://### HDFS PATH ###	5306	[1,2,4,8589934592,1,0]
-val_69	hdfs://### HDFS PATH ###	3570	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	3794	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	4548	[1,2,4,8589934592,1,0]
-val_70	hdfs://### HDFS PATH ###	4640	[1,2,4,8589934592,1,0]
-val_72	hdfs://### HDFS PATH ###	1208	[1,2,4,8589934592,1,0]
-val_72	hdfs://### HDFS PATH ###	2792	[1,2,4,8589934592,1,0]
-val_74	hdfs://### HDFS PATH ###	3548	[1,2,4,8589934592,1,0]
-val_76	hdfs://### HDFS PATH ###	3378	[1,2,4,8589934592,1,0]
-val_76	hdfs://### HDFS PATH ###	3538	[1,2,4,8589934592,1,0]
-val_77	hdfs://### HDFS PATH ###	2622	[1,2,4,8589934592,1,0]
-val_78	hdfs://### HDFS PATH ###	3368	[1,2,4,8589934592,1,0]
-val_8	hdfs://### HDFS PATH ###	1916	[1,2,4,8589934592,1,0]
-val_80	hdfs://### HDFS PATH ###	4058	[1,2,4,8589934592,1,0]
-val_82	hdfs://### HDFS PATH ###	396	[1,2,4,8589934592,1,0]
-val_83	hdfs://### HDFS PATH ###	1674	[1,2,4,8589934592,1,0]
-val_83	hdfs://### HDFS PATH ###	5070	[1,2,4,8589934592,1,0]
-val_84	hdfs://### HDFS PATH ###	1872	[1,2,4,8589934592,1,0]
-val_84	hdfs://### HDFS PATH ###	5606	[1,2,4,8589934592,1,0]
-val_85	hdfs://### HDFS PATH ###	2612	[1,2,4,8589934592,1,0]
-val_86	hdfs://### HDFS PATH ###	12	[1,2,4,8589934592,1,0]
-val_87	hdfs://### HDFS PATH ###	2652	[1,2,4,8589934592,1,0]
-val_9	hdfs://### HDFS PATH ###	5398	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	2802	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	4304	[1,2,4,8589934592,1,0]
-val_90	hdfs://### HDFS PATH ###	5744	[1,2,4,8589934592,1,0]
-val_92	hdfs://### HDFS PATH ###	1176	[1,2,4,8589934592,1,0]
-val_95	hdfs://### HDFS PATH ###	2400	[1,2,4,8589934592,1,0]
-val_95	hdfs://### HDFS PATH ###	3160	[1,2,4,8589934592,1,0]
-val_96	hdfs://### HDFS PATH ###	2216	[1,2,4,8589934592,1,0]
-val_97	hdfs://### HDFS PATH ###	5572	[1,2,4,8589934592,1,0]
-val_97	hdfs://### HDFS PATH ###	5802	[1,2,4,8589934592,1,0]
-val_98	hdfs://### HDFS PATH ###	2458	[1,2,4,8589934592,1,0]
-val_98	hdfs://### HDFS PATH ###	92	[1,2,4,8589934592,1,0]
-PREHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-       WHERE key = 0) a
- JOIN 
-    (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-       WHERE value = "val_0") b
- ON
-   a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-       WHERE key = 0) a
- JOIN 
-    (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-       WHERE value = "val_0") b
- ON
-   a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 4 (PARTITION-LEVEL SORT, 4)
-        Reducer 3 <- Reducer 2 (GROUP, 4)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: default__src_src1_index__
-                  Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: bigint)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: array<bigint>)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: default__src_src2_index__
-                  Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: bigint)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: array<bigint>)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: string), _col1 (type: bigint)
-                  1 _col0 (type: string), _col1 (type: bigint)
-                outputColumnNames: _col0, _col1, _col2, _col5
-                Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
-                  Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: bigint)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: collect_set(_col1)
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: array<bigint>)
-        Reducer 3 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: collect_set(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-     (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-        WHERE value = "val_0") b
-  ON
-    a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
-        WHERE key = 0) a
-  JOIN 
-     (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
-        WHERE value = "val_0") b
-  ON
-    a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	val_0
-0	val_0
-0	val_0
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0	val_0
-0	val_0
-0	val_0
-PREHOOK: query: DROP INDEX src1_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src1_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src2_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src2_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/udf_bitmap_and.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_bitmap_and.q.out b/ql/src/test/results/clientpositive/udf_bitmap_and.q.out
deleted file mode 100644
index 8c93398..0000000
--- a/ql/src/test/results/clientpositive/udf_bitmap_and.q.out
+++ /dev/null
@@ -1,68 +0,0 @@
-PREHOOK: query: select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-[13,2,4,8589934592,4096,0]
-PREHOOK: query: select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-[13,1,4,2,0]
-PREHOOK: query: drop table bitmap_test
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table bitmap_test
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table bitmap_test (a array<bigint>, b array<bigint>)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: create table bitmap_test (a array<bigint>, b array<bigint>)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bitmap_test
-PREHOOK: query: insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@bitmap_test
-POSTHOOK: Lineage: bitmap_test.a EXPRESSION []
-POSTHOOK: Lineage: bitmap_test.b EXPRESSION []
-PREHOOK: query: select ewah_bitmap_and(a,b) from bitmap_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@bitmap_test
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_and(a,b) from bitmap_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@bitmap_test
-#### A masked pattern was here ####
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-[13,1,4,2,0]
-PREHOOK: query: drop table bitmap_test
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@bitmap_test
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: drop table bitmap_test
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@bitmap_test
-POSTHOOK: Output: default@bitmap_test

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/udf_bitmap_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_bitmap_empty.q.out b/ql/src/test/results/clientpositive/udf_bitmap_empty.q.out
deleted file mode 100644
index ca96e78..0000000
--- a/ql/src/test/results/clientpositive/udf_bitmap_empty.q.out
+++ /dev/null
@@ -1,18 +0,0 @@
-PREHOOK: query: select ewah_bitmap_empty(array(13,2,4,8589934592,0,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_empty(array(13,2,4,8589934592,0,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-true
-PREHOOK: query: select ewah_bitmap_empty(array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_empty(array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-false

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/udf_bitmap_or.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_bitmap_or.q.out b/ql/src/test/results/clientpositive/udf_bitmap_or.q.out
deleted file mode 100644
index 43521da..0000000
--- a/ql/src/test/results/clientpositive/udf_bitmap_or.q.out
+++ /dev/null
@@ -1,68 +0,0 @@
-PREHOOK: query: select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-[13,2,4,8589934592,4096,0]
-PREHOOK: query: select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-[13,2,4,8589934592,4224,0]
-PREHOOK: query: drop table bitmap_test
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table bitmap_test
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table bitmap_test (a array<bigint>, b array<bigint>)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: create table bitmap_test (a array<bigint>, b array<bigint>)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bitmap_test
-PREHOOK: query: insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@bitmap_test
-POSTHOOK: Lineage: bitmap_test.a EXPRESSION []
-POSTHOOK: Lineage: bitmap_test.b EXPRESSION []
-PREHOOK: query: select ewah_bitmap_or(a,b) from bitmap_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@bitmap_test
-#### A masked pattern was here ####
-POSTHOOK: query: select ewah_bitmap_or(a,b) from bitmap_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@bitmap_test
-#### A masked pattern was here ####
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-[13,2,4,8589934592,4224,0]
-PREHOOK: query: drop table bitmap_test
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@bitmap_test
-PREHOOK: Output: default@bitmap_test
-POSTHOOK: query: drop table bitmap_test
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@bitmap_test
-POSTHOOK: Output: default@bitmap_test

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/vector_string_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_string_decimal.q.out b/ql/src/test/results/clientpositive/vector_string_decimal.q.out
deleted file mode 100644
index d792c46..0000000
--- a/ql/src/test/results/clientpositive/vector_string_decimal.q.out
+++ /dev/null
@@ -1,137 +0,0 @@
-PREHOOK: query: drop table orc_decimal
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table orc_decimal
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table orc_decimal (id decimal(18,0)) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orc_decimal
-POSTHOOK: query: create table orc_decimal (id decimal(18,0)) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orc_decimal
-PREHOOK: query: create table staging (id decimal(18,0))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@staging
-POSTHOOK: query: create table staging (id decimal(18,0))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@staging
-PREHOOK: query: insert into staging values (34324.0), (100000000.0), (200000000.0), (300000000.0)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@staging
-POSTHOOK: query: insert into staging values (34324.0), (100000000.0), (200000000.0), (300000000.0)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@staging
-POSTHOOK: Lineage: staging.id SCRIPT []
-PREHOOK: query: insert overwrite table orc_decimal select id from staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@staging
-PREHOOK: Output: default@orc_decimal
-POSTHOOK: query: insert overwrite table orc_decimal select id from staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@staging
-POSTHOOK: Output: default@orc_decimal
-POSTHOOK: Lineage: orc_decimal.id SIMPLE [(staging)staging.FieldSchema(name:id, type:decimal(18,0), comment:null), ]
-PREHOOK: query: explain vectorization expression
-select * from orc_decimal where id in ('100000000', '200000000')
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select * from orc_decimal where id in ('100000000', '200000000')
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: orc_decimal
-                  Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterDoubleColumnInList(col 2:double, values [1.0E8, 2.0E8])(children: CastDecimalToDouble(col 0:decimal(18,0)) -> 2:double)
-                    predicate: (UDFToDouble(id)) IN (1.0E8, 2.0E8) (type: boolean)
-                    Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: id (type: decimal(18,0))
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0]
-                      Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from orc_decimal where id in ('100000000', '200000000')
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orc_decimal
-#### A masked pattern was here ####
-POSTHOOK: query: select * from orc_decimal where id in ('100000000', '200000000')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orc_decimal
-#### A masked pattern was here ####
-100000000
-200000000
-PREHOOK: query: drop table orc_decimal
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@orc_decimal
-PREHOOK: Output: default@orc_decimal
-POSTHOOK: query: drop table orc_decimal
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@orc_decimal
-POSTHOOK: Output: default@orc_decimal
-PREHOOK: query: drop table staging
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@staging
-PREHOOK: Output: default@staging
-POSTHOOK: query: drop table staging
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@staging
-POSTHOOK: Output: default@staging


[6/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_gather_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_gather_stats.q.out b/ql/src/test/results/clientpositive/llap/vector_gather_stats.q.out
deleted file mode 100644
index e777242..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_gather_stats.q.out
+++ /dev/null
@@ -1,108 +0,0 @@
-PREHOOK: query: create table cd
-(
-    cd_demo_sk                int,
-    cd_gender                 string,
-    cd_marital_status         string,
-    cd_purchase_estimate      int,
-    cd_credit_rating          string,
-    cd_dep_count              int,
-    cd_dep_employed_count     int,
-    cd_dep_college_count      int
-)
-partitioned by
-(
-    cd_education_status       string
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cd
-POSTHOOK: query: create table cd
-(
-    cd_demo_sk                int,
-    cd_gender                 string,
-    cd_marital_status         string,
-    cd_purchase_estimate      int,
-    cd_credit_rating          string,
-    cd_dep_count              int,
-    cd_dep_employed_count     int,
-    cd_dep_college_count      int
-)
-partitioned by
-(
-    cd_education_status       string
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cd
-PREHOOK: query: alter table cd add partition (cd_education_status='Primary')
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Output: default@cd
-POSTHOOK: query: alter table cd add partition (cd_education_status='Primary')
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Output: default@cd
-POSTHOOK: Output: default@cd@cd_education_status=Primary
-PREHOOK: query: insert into table cd partition (cd_education_status='Primary') values (1, 'M', 'M', 500, 'Good', 0, 0, 0)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@cd@cd_education_status=Primary
-POSTHOOK: query: insert into table cd partition (cd_education_status='Primary') values (1, 'M', 'M', 500, 'Good', 0, 0, 0)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@cd@cd_education_status=Primary
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_credit_rating SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_demo_sk SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_dep_college_count SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_dep_count SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_dep_employed_count SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_gender SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_marital_status SCRIPT []
-POSTHOOK: Lineage: cd PARTITION(cd_education_status=Primary).cd_purchase_estimate SCRIPT []
-PREHOOK: query: explain vectorization detail
-analyze table cd partition (cd_education_status) compute statistics
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-analyze table cd partition (cd_education_status) compute statistics
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: cd
-                  Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: TABLESCAN operator: gather stats not supported
-                vectorized: false
-
-  Stage: Stage-2
-    Stats Work
-      Basic Stats Work:
-
-PREHOOK: query: analyze table cd partition (cd_education_status) compute statistics
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cd
-PREHOOK: Input: default@cd@cd_education_status=Primary
-PREHOOK: Output: default@cd
-PREHOOK: Output: default@cd@cd_education_status=Primary
-POSTHOOK: query: analyze table cd partition (cd_education_status) compute statistics
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cd
-POSTHOOK: Input: default@cd@cd_education_status=Primary
-POSTHOOK: Output: default@cd
-POSTHOOK: Output: default@cd@cd_education_status=Primary

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join.q.out b/ql/src/test/results/clientpositive/llap/vector_join.q.out
deleted file mode 100644
index 94c0290..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_join.q.out
+++ /dev/null
@@ -1,104 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS test1_vc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS test1_vc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS test2_vc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS test2_vc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE test1_vc
- (
-   id string)
-   PARTITIONED BY (
-  cr_year bigint,
-  cr_month bigint)
- ROW FORMAT SERDE
-  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-STORED AS INPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-OUTPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-TBLPROPERTIES (
-  'serialization.null.format'='' )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test1_vc
-POSTHOOK: query: CREATE TABLE test1_vc
- (
-   id string)
-   PARTITIONED BY (
-  cr_year bigint,
-  cr_month bigint)
- ROW FORMAT SERDE
-  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-STORED AS INPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-OUTPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-TBLPROPERTIES (
-  'serialization.null.format'='' )
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test1_vc
-PREHOOK: query: CREATE TABLE test2_vc(
-    id string
-  )
-   PARTITIONED BY (
-  cr_year bigint,
-  cr_month bigint)
-ROW FORMAT SERDE
-  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-STORED AS INPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-OUTPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-TBLPROPERTIES (
-  'serialization.null.format'=''
- )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test2_vc
-POSTHOOK: query: CREATE TABLE test2_vc(
-    id string
-  )
-   PARTITIONED BY (
-  cr_year bigint,
-  cr_month bigint)
-ROW FORMAT SERDE
-  'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-STORED AS INPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-OUTPUTFORMAT
-  'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-TBLPROPERTIES (
-  'serialization.null.format'=''
- )
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test2_vc
-PREHOOK: query: SELECT cr.id1 ,
-cr.id2
-FROM
-(SELECT t1.id id1,
- t2.id id2
- from
- (select * from test1_vc ) t1
- left outer join test2_vc  t2
- on t1.id=t2.id) cr
-PREHOOK: type: QUERY
-PREHOOK: Input: default@test1_vc
-PREHOOK: Input: default@test2_vc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT cr.id1 ,
-cr.id2
-FROM
-(SELECT t1.id id1,
- t2.id id2
- from
- (select * from test1_vc ) t1
- left outer join test2_vc  t2
- on t1.id=t2.id) cr
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@test1_vc
-POSTHOOK: Input: default@test2_vc
-#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out b/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
deleted file mode 100644
index 5e168a9..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
+++ /dev/null
@@ -1,175 +0,0 @@
-PREHOOK: query: drop table if exists char_part_tbl1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table if exists char_part_tbl1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table if exists char_part_tbl2
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table if exists char_part_tbl2
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table studenttab(name string, age int, gpa double) clustered by (age) into 2 buckets stored as orc tblproperties('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@studenttab
-POSTHOOK: query: create table studenttab(name string, age int, gpa double) clustered by (age) into 2 buckets stored as orc tblproperties('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@studenttab
-PREHOOK: query: insert into table studenttab values ('calvin garcia',56,2.50), ('oscar miller',66,3.00), ('(yuri xylophone',30,2.74),('alice underhill',46,3.50)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@studenttab
-POSTHOOK: query: insert into table studenttab values ('calvin garcia',56,2.50), ('oscar miller',66,3.00), ('(yuri xylophone',30,2.74),('alice underhill',46,3.50)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@studenttab
-POSTHOOK: Lineage: studenttab.age SCRIPT []
-POSTHOOK: Lineage: studenttab.gpa SCRIPT []
-POSTHOOK: Lineage: studenttab.name SCRIPT []
-PREHOOK: query: create table char_tbl1(name string, age int) partitioned  by(gpa char(50)) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_tbl1
-POSTHOOK: query: create table char_tbl1(name string, age int) partitioned  by(gpa char(50)) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_tbl1
-PREHOOK: query: create table char_tbl2(name string, age int) partitioned by(gpa char(5)) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_tbl2
-POSTHOOK: query: create table char_tbl2(name string, age int) partitioned by(gpa char(5)) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_tbl2
-PREHOOK: query: insert into table char_tbl1 partition(gpa='3.5') select name, age from studenttab where gpa = 3.5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@studenttab
-PREHOOK: Output: default@char_tbl1@gpa=3.5                                               
-POSTHOOK: query: insert into table char_tbl1 partition(gpa='3.5') select name, age from studenttab where gpa = 3.5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@studenttab
-POSTHOOK: Output: default@char_tbl1@gpa=3.5                                               
-POSTHOOK: Lineage: char_tbl1 PARTITION(gpa=3.5                                               ).age SIMPLE [(studenttab)studenttab.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: char_tbl1 PARTITION(gpa=3.5                                               ).name SIMPLE [(studenttab)studenttab.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: insert into table char_tbl1 partition(gpa='2.5') select name, age from studenttab where gpa = 2.5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@studenttab
-PREHOOK: Output: default@char_tbl1@gpa=2.5                                               
-POSTHOOK: query: insert into table char_tbl1 partition(gpa='2.5') select name, age from studenttab where gpa = 2.5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@studenttab
-POSTHOOK: Output: default@char_tbl1@gpa=2.5                                               
-POSTHOOK: Lineage: char_tbl1 PARTITION(gpa=2.5                                               ).age SIMPLE [(studenttab)studenttab.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: char_tbl1 PARTITION(gpa=2.5                                               ).name SIMPLE [(studenttab)studenttab.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: insert into table char_tbl2 partition(gpa='3.5') select name, age from studenttab where gpa = 3.5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@studenttab
-PREHOOK: Output: default@char_tbl2@gpa=3.5  
-POSTHOOK: query: insert into table char_tbl2 partition(gpa='3.5') select name, age from studenttab where gpa = 3.5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@studenttab
-POSTHOOK: Output: default@char_tbl2@gpa=3.5  
-POSTHOOK: Lineage: char_tbl2 PARTITION(gpa=3.5  ).age SIMPLE [(studenttab)studenttab.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: char_tbl2 PARTITION(gpa=3.5  ).name SIMPLE [(studenttab)studenttab.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: insert into table char_tbl2 partition(gpa='3') select name, age from studenttab where gpa = 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@studenttab
-PREHOOK: Output: default@char_tbl2@gpa=3    
-POSTHOOK: query: insert into table char_tbl2 partition(gpa='3') select name, age from studenttab where gpa = 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@studenttab
-POSTHOOK: Output: default@char_tbl2@gpa=3    
-POSTHOOK: Lineage: char_tbl2 PARTITION(gpa=3    ).age SIMPLE [(studenttab)studenttab.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: char_tbl2 PARTITION(gpa=3    ).name SIMPLE [(studenttab)studenttab.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: show partitions char_tbl1
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@char_tbl1
-POSTHOOK: query: show partitions char_tbl1
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@char_tbl1
-gpa=2.5                                               
-gpa=3.5                                               
-PREHOOK: query: show partitions char_tbl2
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@char_tbl2
-POSTHOOK: query: show partitions char_tbl2
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@char_tbl2
-gpa=3    
-gpa=3.5  
-PREHOOK: query: explain vectorization select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2 llap
-      File Output Operator [FS_10]
-        Merge Join Operator [MERGEJOIN_21] (rows=2 width=429)
-          Conds:RS_23._col2=RS_28._col2(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-        <-Map 1 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_23]
-            PartitionCols:_col2
-            Select Operator [SEL_22] (rows=2 width=237)
-              Output:["_col0","_col1","_col2"]
-              TableScan [TS_0] (rows=2 width=237)
-                default@char_tbl1,c1,Tbl:COMPLETE,Col:COMPLETE,Output:["name","age"]
-          Dynamic Partitioning Event Operator [EVENT_26] (rows=1 width=237)
-            Group By Operator [GBY_25] (rows=1 width=237)
-              Output:["_col0"],keys:_col0
-              Select Operator [SEL_24] (rows=2 width=237)
-                Output:["_col0"]
-                 Please refer to the previous Select Operator [SEL_22]
-        <-Map 3 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_28]
-            PartitionCols:_col2
-            Select Operator [SEL_27] (rows=2 width=192)
-              Output:["_col0","_col1","_col2"]
-              TableScan [TS_3] (rows=2 width=192)
-                default@char_tbl2,c2,Tbl:COMPLETE,Col:COMPLETE,Output:["name","age"]
-
-PREHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_tbl1
-PREHOOK: Input: default@char_tbl1@gpa=2.5                                               
-PREHOOK: Input: default@char_tbl1@gpa=3.5                                               
-PREHOOK: Input: default@char_tbl2
-PREHOOK: Input: default@char_tbl2@gpa=3    
-PREHOOK: Input: default@char_tbl2@gpa=3.5  
-#### A masked pattern was here ####
-POSTHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_tbl1
-POSTHOOK: Input: default@char_tbl1@gpa=2.5                                               
-POSTHOOK: Input: default@char_tbl1@gpa=3.5                                               
-POSTHOOK: Input: default@char_tbl2
-POSTHOOK: Input: default@char_tbl2@gpa=3    
-POSTHOOK: Input: default@char_tbl2@gpa=3.5  
-#### A masked pattern was here ####
-alice underhill	46	3.5                                               	alice underhill	46	3.5  
-PREHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_tbl1
-PREHOOK: Input: default@char_tbl1@gpa=2.5                                               
-PREHOOK: Input: default@char_tbl1@gpa=3.5                                               
-PREHOOK: Input: default@char_tbl2
-PREHOOK: Input: default@char_tbl2@gpa=3    
-PREHOOK: Input: default@char_tbl2@gpa=3.5  
-#### A masked pattern was here ####
-POSTHOOK: query: select c1.name, c1.age, c1.gpa, c2.name, c2.age, c2.gpa from char_tbl1 c1 join char_tbl2 c2 on (c1.gpa = c2.gpa)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_tbl1
-POSTHOOK: Input: default@char_tbl1@gpa=2.5                                               
-POSTHOOK: Input: default@char_tbl1@gpa=3.5                                               
-POSTHOOK: Input: default@char_tbl2
-POSTHOOK: Input: default@char_tbl2@gpa=3    
-POSTHOOK: Input: default@char_tbl2@gpa=3.5  
-#### A masked pattern was here ####
-alice underhill	46	3.5                                               	alice underhill	46	3.5  

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out b/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
deleted file mode 100644
index d02fa08..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
+++ /dev/null
@@ -1,51 +0,0 @@
-PREHOOK: query: explain vectorization SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint)
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (cint) IN (UDFToInteger(ctinyint), UDFToInteger(cbigint)) (type: boolean)
-                    Statistics: Num rows: 6144 Data size: 1546640 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                      Statistics: Num rows: 6144 Data size: 1546640 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 6144 Data size: 1546640 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: FILTER operator: Vectorizing IN expression only supported for constant values
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
deleted file mode 100644
index ee0e664..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
+++ /dev/null
@@ -1,274 +0,0 @@
-PREHOOK: query: CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@non_string_part
-POSTHOOK: query: CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@non_string_part
-PREHOOK: query: INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc 
-WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-PREHOOK: Output: default@non_string_part
-POSTHOOK: query: INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc 
-WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-POSTHOOK: Output: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__
-POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
-POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
-POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
-PREHOOK: query: SHOW PARTITIONS non_string_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@non_string_part
-POSTHOOK: query: SHOW PARTITIONS non_string_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@non_string_part
-ctinyint=__HIVE_DEFAULT_PARTITION__
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: non_string_part
-                  Statistics: Num rows: 3073 Data size: 24584 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0:int, val 0)
-                    predicate: (cint > 0) (type: boolean)
-                    Statistics: Num rows: 1024 Data size: 8192 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 4]
-                      Statistics: Num rows: 1024 Data size: 8192 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1024 Data size: 8192 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-                        value expressions: _col1 (type: tinyint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: tinyint)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1]
-                Statistics: Num rows: 1024 Data size: 8192 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@non_string_part
-PREHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@non_string_part
-POSTHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-762	NULL
-762	NULL
-6981	NULL
-6981	NULL
-6981	NULL
-86028	NULL
-504142	NULL
-799471	NULL
-1248059	NULL
-1286921	NULL
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION  SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: non_string_part
-                  Statistics: Num rows: 3073 Data size: 313446 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: FilterLongColGreaterLongScalar(col 0:int, val 0)
-                    predicate: (cint > 0) (type: boolean)
-                    Statistics: Num rows: 1024 Data size: 104448 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cint (type: int), cstring1 (type: string)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 1]
-                      Statistics: Num rows: 1024 Data size: 104448 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: string)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1024 Data size: 104448 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.1
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1]
-                Statistics: Num rows: 1024 Data size: 104448 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 10
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 10 Data size: 1020 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 10 Data size: 1020 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@non_string_part
-PREHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@non_string_part
-POSTHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-762	3WsVeqb28VWEEOLI8ail
-762	40ks5556SV
-6981	1FNNhmiFLGw425NA13g
-6981	o5mb0QP5Y48Qd4vdB0
-6981	sF2CRfgt2K
-86028	T2o8XRFAL0HC4ikDQnfoCymw
-504142	PlOxor04p5cvVl
-799471	2fu24
-1248059	Uhps6mMh3IfHB3j7yH62K
-1286921	ODLrXI8882q8LS8

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_orc_string_reader_empty_dict.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orc_string_reader_empty_dict.q.out b/ql/src/test/results/clientpositive/llap/vector_orc_string_reader_empty_dict.q.out
deleted file mode 100644
index 4f00bed..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_orc_string_reader_empty_dict.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-PREHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@orcstr
-POSTHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@orcstr
-PREHOOK: query: insert overwrite table orcstr select null from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@orcstr
-POSTHOOK: query: insert overwrite table orcstr select null from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@orcstr
-POSTHOOK: Lineage: orcstr.vcol EXPRESSION []
-PREHOOK: query: select vcol from orcstr limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-POSTHOOK: query: select vcol from orcstr limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-NULL
-PREHOOK: query: select vcol from orcstr limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-POSTHOOK: query: select vcol from orcstr limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-NULL
-PREHOOK: query: insert overwrite table orcstr select "" from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@orcstr
-POSTHOOK: query: insert overwrite table orcstr select "" from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@orcstr
-POSTHOOK: Lineage: orcstr.vcol EXPRESSION []
-PREHOOK: query: select vcol from orcstr limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-POSTHOOK: query: select vcol from orcstr limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-
-PREHOOK: query: select vcol from orcstr limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-POSTHOOK: query: select vcol from orcstr limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@orcstr
-#### A masked pattern was here ####
-

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_outer_join_no_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join_no_keys.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join_no_keys.q.out
deleted file mode 100644
index 8b37aa6..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join_no_keys.q.out
+++ /dev/null
@@ -1,408 +0,0 @@
-PREHOOK: query: create temporary table foo(x int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@foo
-POSTHOOK: query: create temporary table foo(x int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@foo
-PREHOOK: query: insert into foo values(1),(2)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@foo
-POSTHOOK: query: insert into foo values(1),(2)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@foo
-POSTHOOK: Lineage: foo.x SCRIPT []
-PREHOOK: query: create temporary table bar(y int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@bar
-POSTHOOK: query: create temporary table bar(y int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@bar
-Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 2' is a cross product
-PREHOOK: query: explain vectorization detail
-select count(*) from bar right outer join foo
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select count(*) from bar right outer join foo
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Map 2 <- Map 1 (BROADCAST_EDGE)
-        Reducer 3 <- Map 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: bar
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:y:int, 1:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: []
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                    Reduce Output Operator
-                      sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkEmptyKeyOperator
-                          keyColumnNums: []
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: []
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: []
-                    dataColumns: y:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  alias: foo
-                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:x:int, 1:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: []
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    Map Join Operator
-                      condition map:
-                           Right Outer Join 0 to 1
-                      filter predicates:
-                        0 
-                        1 {true}
-                      keys:
-                        0 
-                        1 
-                      Map Join Vectorization:
-                          className: VectorMapJoinOuterFilteredOperator
-                          native: false
-                          nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
-                          nativeConditionsNotMet: Outer Join has keys IS false
-                      input vertices:
-                        0 Map 1
-                      Statistics: Num rows: 2 Data size: 10 Basic stats: PARTIAL Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        Group By Vectorization:
-                            aggregators: VectorUDAFCountStar(*) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0]
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkEmptyKeyOperator
-                              keyColumnNums: []
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              valueColumnNums: [0]
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: []
-                    dataColumns: x:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: 
-                reduceColumnSortOrder: 
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: VALUE._col0:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 2' is a cross product
-PREHOOK: query: -- = 2
-
-select count(*) from bar right outer join foo
-PREHOOK: type: QUERY
-PREHOOK: Input: default@bar
-PREHOOK: Input: default@foo
-#### A masked pattern was here ####
-POSTHOOK: query: -- = 2
-
-select count(*) from bar right outer join foo
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@bar
-POSTHOOK: Input: default@foo
-#### A masked pattern was here ####
-2
-Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- = 2
-
-explain vectorization detail
-select count(*) from bar, foo
-PREHOOK: type: QUERY
-POSTHOOK: query: -- = 2
-
-explain vectorization detail
-select count(*) from bar, foo
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (XPROD_EDGE), Map 4 (XPROD_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: bar
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:y:int, 1:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: []
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                    Reduce Output Operator
-                      sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkEmptyKeyOperator
-                          keyColumnNums: []
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: []
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: []
-                    dataColumns: y:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: foo
-                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:x:int, 1:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: []
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      sort order: 
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkEmptyKeyOperator
-                          keyColumnNums: []
-                          native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: []
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    includeColumns: []
-                    dataColumns: x:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                Statistics: Num rows: 2 Data size: 10 Basic stats: PARTIAL Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: 
-                reduceColumnSortOrder: 
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: VALUE._col0:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- = 0 
-
-select count(*) from bar, foo
-PREHOOK: type: QUERY
-PREHOOK: Input: default@bar
-PREHOOK: Input: default@foo
-#### A masked pattern was here ####
-POSTHOOK: query: -- = 0 
-
-select count(*) from bar, foo
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@bar
-POSTHOOK: Input: default@foo
-#### A masked pattern was here ####
-0

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
deleted file mode 100644
index 4429e9a..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
+++ /dev/null
@@ -1,400 +0,0 @@
-PREHOOK: query: explain vectorization detail
-select 'key1', 'value1' from alltypesorc tablesample (1 rows)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select 'key1', 'value1' from alltypesorc tablesample (1 rows)
-POSTHOOK: type: QUERY
-Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Row Limit Per Split: 1
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: 'key1' (type: string), 'value1' (type: string)
-                    outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [13, 14]
-                        selectExpressions: ConstantVectorExpression(val key1) -> 13:string, ConstantVectorExpression(val value1) -> 14:string
-                    Statistics: Num rows: 12288 Data size: 2187264 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 12288 Data size: 2187264 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: []
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [string, string]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select 'key1', 'value1' from alltypesorc tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select 'key1', 'value1' from alltypesorc tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-_c0	_c1
-key1	value1
-PREHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@decimal_2
-POSTHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@decimal_2
-PREHOOK: query: explain vectorization detail
-insert overwrite table decimal_2
-  select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-insert overwrite table decimal_2
-  select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows)
-POSTHOOK: type: QUERY
-Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Row Limit Per Split: 1
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: 17.29 (type: decimal(18,9))
-                    outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [13]
-                        selectExpressions: ConstantVectorExpression(val 17.29) -> 13:decimal(18,9)
-                    Statistics: Num rows: 12288 Data size: 1376256 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 12288 Data size: 1376256 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                          name: default.decimal_2
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: []
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(18,9)]
-
-  Stage: Stage-2
-    Dependency Collection
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              name: default.decimal_2
-
-  Stage: Stage-3
-    Stats Work
-      Basic Stats Work:
-
-PREHOOK: query: insert overwrite table decimal_2
-  select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-PREHOOK: Output: default@decimal_2
-POSTHOOK: query: insert overwrite table decimal_2
-  select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-POSTHOOK: Output: default@decimal_2
-POSTHOOK: Lineage: decimal_2.t EXPRESSION []
-_col0
-PREHOOK: query: select count(*) from decimal_2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_2
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from decimal_2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_2
-#### A masked pattern was here ####
-_c0
-1
-PREHOOK: query: drop table decimal_2
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@decimal_2
-PREHOOK: Output: default@decimal_2
-POSTHOOK: query: drop table decimal_2
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@decimal_2
-POSTHOOK: Output: default@decimal_2
-PREHOOK: query: explain vectorization detail
-select count(1) from (select * from (Select 1 a) x order by x.a) y
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select count(1) from (select * from (Select 1 a) x order by x.a) y
-POSTHOOK: type: QUERY
-Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: _dummy_table
-                  Row Limit Per Split: 1
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-#### A masked pattern was here ####
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: 
-                reduceColumnSortOrder: 
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: VALUE._col0:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(1) from (select * from (Select 1 a) x order by x.a) y
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from (select * from (Select 1 a) x order by x.a) y
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-#### A masked pattern was here ####
-_c0
-1
-PREHOOK: query: explain vectorization detail
-create temporary table dual as select 1
-PREHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: query: explain vectorization detail
-create temporary table dual as select 1
-POSTHOOK: type: CREATETABLE_AS_SELECT
-Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-4 depends on stages: Stage-2, Stage-0
-  Stage-3 depends on stages: Stage-4
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: _dummy_table
-                  Row Limit Per Split: 1
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          name: default.dual
-            Execution mode: llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: false
-#### A masked pattern was here ####
-
-  Stage: Stage-2
-    Dependency Collection
-
-  Stage: Stage-4
-      Create Table Operator:
-        Create Table
-          columns: _c0 int
-          input format: org.apache.hadoop.mapred.TextInputFormat
-#### A masked pattern was here ####
-          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-          serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          name: default.dual
-          isTemporary: true
-
-  Stage: Stage-3
-    Stats Work
-      Basic Stats Work:
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: create temporary table dual as select 1
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dual
-POSTHOOK: query: create temporary table dual as select 1
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dual
-_c0
-PREHOOK: query: select * from dual
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dual
-#### A masked pattern was here ####
-POSTHOOK: query: select * from dual
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dual
-#### A masked pattern was here ####
-dual._c0
-1


[8/9] hive git commit: HIVE-18805: Add ConstantPropagate before stats annotation (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
HIVE-18805: Add ConstantPropagate before stats annotation (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a926179f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a926179f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a926179f

Branch: refs/heads/master
Commit: a926179f3d17223886a77e6c9733199e5f8b2b58
Parents: 99380fb
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Tue Feb 27 07:48:02 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Feb 27 07:50:36 2018 +0100

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/TezCompiler.java       | 19 ++++++--
 .../llap/dynamic_semijoin_reduction_2.q.out     |  2 +-
 .../clientpositive/llap/explainuser_1.q.out     |  4 +-
 .../clientpositive/llap/join32_lessSize.q.out   |  2 +-
 .../results/clientpositive/llap/join46.q.out    |  8 ----
 .../llap/limit_join_transpose.q.out             | 48 --------------------
 .../clientpositive/llap/llap_partitioned.q.out  |  2 +-
 .../results/clientpositive/llap/mapjoin46.q.out |  4 --
 .../clientpositive/llap/semijoin_hint.q.out     | 16 +++----
 .../llap/subquery_in_having.q.out               |  8 ++--
 .../clientpositive/llap/subquery_multi.q.out    |  3 --
 .../clientpositive/llap/subquery_notin.q.out    |  3 --
 .../clientpositive/llap/subquery_scalar.q.out   | 16 +++----
 .../clientpositive/llap/subquery_select.q.out   |  4 --
 .../clientpositive/llap/tez_join_tests.q.out    |  4 --
 .../clientpositive/llap/tez_joins_explain.q.out |  4 --
 .../vector_reduce_groupby_duplicate_cols.q.out  |  2 +-
 .../clientpositive/perf/tez/query72.q.out       | 10 ++--
 .../clientpositive/perf/tez/query78.q.out       |  8 ++--
 19 files changed, 48 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index f9a6386..dfd7908 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -145,6 +145,11 @@ public class TezCompiler extends TaskCompiler {
     runDynamicPartitionPruning(procCtx, inputs, outputs);
     perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Setup dynamic partition pruning");
 
+    // need to run this; to get consistent filterop conditions(for operator tree matching)
+    if (procCtx.conf.getBoolVar(ConfVars.HIVEOPTCONSTANTPROPAGATION)) {
+      new ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext);
+    }
+
     perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
     // setup stats in the operator plan
     runStatsAnnotation(procCtx);
@@ -266,7 +271,9 @@ public class TezCompiler extends TaskCompiler {
 
         SemiJoinBranchInfo sjInfo =
                 context.parseContext.getRsToSemiJoinBranchInfo().get(o);
-        if (sjInfo == null ) continue;
+        if (sjInfo == null ) {
+          continue;
+        }
         if (sjInfo.getIsHint()) {
           // Skipping because of hint. Mark this info,
           hasHint = true;
@@ -866,7 +873,9 @@ public class TezCompiler extends TaskCompiler {
 
         ReduceSinkOperator rs = ((ReduceSinkOperator) child);
         SemiJoinBranchInfo sjInfo = pCtx.getRsToSemiJoinBranchInfo().get(rs);
-        if (sjInfo == null) continue;
+        if (sjInfo == null) {
+          continue;
+        }
 
         TableScanOperator ts = sjInfo.getTsOp();
         // This is a semijoin branch. Find if this is creating a potential
@@ -925,7 +934,9 @@ public class TezCompiler extends TaskCompiler {
         GenericUDAFBloomFilterEvaluator udafBloomFilterEvaluator =
                 (GenericUDAFBloomFilterEvaluator) agg.getGenericUDAFEvaluator();
         if (udafBloomFilterEvaluator.hasHintEntries())
+         {
           return null; // Created using hint, skip it
+        }
 
         long expectedEntries = udafBloomFilterEvaluator.getExpectedEntries();
         if (expectedEntries == -1 || expectedEntries >
@@ -1052,7 +1063,9 @@ public class TezCompiler extends TaskCompiler {
 
           ReduceSinkOperator rs = (ReduceSinkOperator) child;
           SemiJoinBranchInfo sjInfo = parseContext.getRsToSemiJoinBranchInfo().get(rs);
-          if (sjInfo == null) continue;
+          if (sjInfo == null) {
+            continue;
+          }
 
           TableScanOperator ts = sjInfo.getTsOp();
           if (ts != bigTableTS) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
index cab45c9..8bce445 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
@@ -632,7 +632,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col1 (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1)
                         mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index 43ca0d9..45b2b4f 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -2256,9 +2256,7 @@ Stage-0
                           PartitionCols:_col0
                           Group By Operator [GBY_12] (rows=83 width=87)
                             Output:["_col0"],keys:key
-                            Filter Operator [FIL_29] (rows=166 width=87)
-                              predicate:(key > '2')
-                               Please refer to the previous TableScan [TS_2]
+                             Please refer to the previous Filter Operator [FIL_28]
 
 PREHOOK: query: explain select p_mfgr, b.p_name, p_size 
 from part b 

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
index 5b5be13..f8622da 100644
--- a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
@@ -1947,7 +1947,7 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col1 (type: string)
                 outputColumnNames: _col0, _col3, _col4
-                Position of Big Table: 1
+                Position of Big Table: 0
                 Statistics: Num rows: 64 Data size: 17024 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/join46.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join46.q.out b/ql/src/test/results/clientpositive/llap/join46.q.out
index 4625b9f..ecb34d1 100644
--- a/ql/src/test/results/clientpositive/llap/join46.q.out
+++ b/ql/src/test/results/clientpositive/llap/join46.q.out
@@ -2086,10 +2086,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: int)
                       Statistics: Num rows: 6 Data size: 572 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: int), _col2 (type: string)
-                  Select Operator
-                    expressions: key (type: int), value (type: int), col_1 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 6 Data size: 572 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: int)
                       sort order: +
@@ -2113,10 +2109,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: int)
                       Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: int), _col2 (type: string)
-                  Select Operator
-                    expressions: key (type: int), value (type: int), col_2 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: int)
                       sort order: +

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
index 4dd7432..4db4f12 100644
--- a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
@@ -242,10 +242,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
@@ -360,10 +356,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 1
                       Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
@@ -372,10 +364,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
@@ -518,10 +506,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 1
                       Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
@@ -530,10 +514,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
@@ -693,10 +673,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
@@ -1133,10 +1109,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
@@ -1252,10 +1224,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 1
                       Offset of rows: 1
@@ -1265,10 +1233,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
@@ -1412,10 +1376,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 1
                       Offset of rows: 1
@@ -1425,10 +1385,6 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +
@@ -1591,10 +1547,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
index ac75467..5397839 100644
--- a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
@@ -1710,7 +1710,7 @@ STAGE PLANS:
                         native: true
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         valueColumnNums: []
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 10 Data size: 2640 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
                     outputColumnNames: _col0

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/mapjoin46.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin46.q.out b/ql/src/test/results/clientpositive/llap/mapjoin46.q.out
index f530c1e..c6c34be 100644
--- a/ql/src/test/results/clientpositive/llap/mapjoin46.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapjoin46.q.out
@@ -1901,10 +1901,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: int)
                       Statistics: Num rows: 6 Data size: 572 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: int), _col2 (type: string)
-                  Select Operator
-                    expressions: key (type: int), value (type: int), col_1 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 6 Data size: 572 Basic stats: COMPLETE Column stats: COMPLETE
                     Map Join Operator
                       condition map:
                            Left Outer Join 0 to 1

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
index cdfa733..e666c50 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
@@ -487,7 +487,7 @@ STAGE PLANS:
                 Select Operator
                   expressions: str (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 4056 Data size: 352872 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4056 Data size: 705744 Basic stats: COMPLETE Column stats: COMPLETE
                   Group By Operator
                     aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                     mode: hash
@@ -592,7 +592,7 @@ STAGE PLANS:
                       Select Operator
                         expressions: cstring (type: string)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 9174 Data size: 1287800 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
                           aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=3000)
                           mode: hash
@@ -741,7 +741,7 @@ STAGE PLANS:
                       Select Operator
                         expressions: str (type: string)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 2000 Data size: 348000 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
                           aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                           mode: hash
@@ -1485,7 +1485,7 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col1 (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 4056 Data size: 352872 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4056 Data size: 705744 Basic stats: COMPLETE Column stats: COMPLETE
                   Group By Operator
                     aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                     mode: hash
@@ -1962,7 +1962,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: str (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2000 Data size: 348000 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                         mode: hash
@@ -2381,7 +2381,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: str (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2000 Data size: 348000 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                         mode: hash
@@ -2562,7 +2562,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: cstring (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 9174 Data size: 1287800 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=3000)
                         mode: hash
@@ -2665,7 +2665,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: str (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2000 Data size: 348000 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                         mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out
index cc0b6a5..1c6c120 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out
@@ -1567,8 +1567,8 @@ POSTHOOK: Lineage: src_null.key SCRIPT []
 POSTHOOK: Lineage: src_null.value EXPRESSION []
 Warning: Map Join MAPJOIN[123][bigTable=?] in task 'Map 1' is a cross product
 Warning: Map Join MAPJOIN[124][bigTable=?] in task 'Map 1' is a cross product
-Warning: Map Join MAPJOIN[126][bigTable=?] in task 'Map 1' is a cross product
-Warning: Shuffle Join MERGEJOIN[125][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
+Warning: Map Join MAPJOIN[125][bigTable=?] in task 'Map 1' is a cross product
+Warning: Shuffle Join MERGEJOIN[126][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[127][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 6' is a cross product
 PREHOOK: query: explain
 select key, value, count(*)
@@ -2043,8 +2043,8 @@ STAGE PLANS:
 
 Warning: Map Join MAPJOIN[123][bigTable=?] in task 'Map 1' is a cross product
 Warning: Map Join MAPJOIN[124][bigTable=?] in task 'Map 1' is a cross product
-Warning: Map Join MAPJOIN[126][bigTable=?] in task 'Map 1' is a cross product
-Warning: Shuffle Join MERGEJOIN[125][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
+Warning: Map Join MAPJOIN[125][bigTable=?] in task 'Map 1' is a cross product
+Warning: Shuffle Join MERGEJOIN[126][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[127][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 6' is a cross product
 PREHOOK: query: select key, value, count(*)
 from src_null b

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/subquery_multi.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out
index 67a22c8..de543c9 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out
@@ -3989,9 +3989,6 @@ STAGE PLANS:
                           sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
-                  Filter Operator
-                    predicate: (key > '9') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       keys: key (type: string)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
index 50c18c8..247d8f3 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
@@ -62,9 +62,6 @@ STAGE PLANS:
                         sort order: 
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
-                  Filter Operator
-                    predicate: (key > '2') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       keys: key (type: string)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
index cec3daa..6a2c635 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
@@ -1363,9 +1363,9 @@ STAGE PLANS:
                   1 _col2 (type: string)
                 outputColumnNames: _col1, _col2, _col3
                 residual filter predicates: {((_col1 + 100) < CASE WHEN (_col3 is null) THEN (null) ELSE (_col2) END)}
-                Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   Group By Operator
                     aggregations: count()
                     mode: hash
@@ -3167,7 +3167,7 @@ STAGE PLANS:
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col4
                 residual filter predicates: {(_col1 like CASE WHEN (_col4 is null) THEN (null) ELSE (_col3) END)}
-                Statistics: Num rows: 8 Data size: 2504 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 8 Data size: 2472 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int)
                   outputColumnNames: _col0
@@ -3825,7 +3825,7 @@ STAGE PLANS:
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col5, _col6
                 residual filter predicates: {(_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END)}
-                Statistics: Num rows: 7 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int), _col3 (type: int)
                   outputColumnNames: _col0, _col1
@@ -4034,7 +4034,7 @@ STAGE PLANS:
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col5, _col6
                 residual filter predicates: {(_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END)}
-                Statistics: Num rows: 7 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 7 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int), _col3 (type: int)
                   outputColumnNames: _col0, _col1
@@ -4378,7 +4378,7 @@ STAGE PLANS:
                   1 _col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 residual filter predicates: {(not (_col1 like CASE WHEN (_col4 is null) THEN (null) ELSE (_col3) END))}
-                Statistics: Num rows: 7 Data size: 3535 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 7 Data size: 3507 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1
@@ -6270,10 +6270,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: int)
                       Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: int)
-                  Select Operator
-                    expressions: i (type: int), j (type: int)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
                       Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/subquery_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_select.q.out b/ql/src/test/results/clientpositive/llap/subquery_select.q.out
index 89eab37..a399d3e 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_select.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_select.q.out
@@ -4159,10 +4159,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: int)
                       Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: int)
-                  Select Operator
-                    expressions: p_partkey (type: int), p_size (type: int)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
                       Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out b/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
index 75879fb..dd429b6 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
@@ -33,10 +33,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out b/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
index 3790b37..1404e15 100644
--- a/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
@@ -33,10 +33,6 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: string)
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: string)
                       sort order: +

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
index 07c1e41..d19debe 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
                     key expressions: 1 (type: int), 2 (type: int)
                     sort order: ++
                     Map-reduce partition columns: 1 (type: int), 2 (type: int)
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: no inputs
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/perf/tez/query72.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query72.q.out b/ql/src/test/results/clientpositive/perf/tez/query72.q.out
index 55ddd5a..acab54f 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query72.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query72.q.out
@@ -164,7 +164,7 @@ Stage-0
                                               <-Reducer 13 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_38]
                                                   PartitionCols:_col4
-                                                  Merge Join Operator [MERGEJOIN_136] (rows=421645953 width=135)
+                                                  Merge Join Operator [MERGEJOIN_135] (rows=421645953 width=135)
                                                     Conds:RS_35._col5=RS_36._col0(Left Outer),Output:["_col1","_col4","_col6","_col7","_col9","_col10","_col16"]
                                                   <-Map 19 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_36]
@@ -176,7 +176,7 @@ Stage-0
                                                   <-Reducer 12 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_35]
                                                       PartitionCols:_col5
-                                                      Merge Join Operator [MERGEJOIN_135] (rows=383314495 width=135)
+                                                      Merge Join Operator [MERGEJOIN_134] (rows=383314495 width=135)
                                                         Conds:RS_32._col3=RS_33._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                       <-Map 18 [SIMPLE_EDGE]
                                                         SHUFFLE [RS_33]
@@ -190,7 +190,7 @@ Stage-0
                                                       <-Reducer 11 [SIMPLE_EDGE]
                                                         SHUFFLE [RS_32]
                                                           PartitionCols:_col3
-                                                          Merge Join Operator [MERGEJOIN_134] (rows=348467716 width=135)
+                                                          Merge Join Operator [MERGEJOIN_133] (rows=348467716 width=135)
                                                             Conds:RS_29._col2=RS_30._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                           <-Map 17 [SIMPLE_EDGE]
                                                             SHUFFLE [RS_30]
@@ -204,7 +204,7 @@ Stage-0
                                                           <-Reducer 10 [SIMPLE_EDGE]
                                                             SHUFFLE [RS_29]
                                                               PartitionCols:_col2
-                                                              Merge Join Operator [MERGEJOIN_133] (rows=316788826 width=135)
+                                                              Merge Join Operator [MERGEJOIN_132] (rows=316788826 width=135)
                                                                 Conds:RS_26._col0=RS_27._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                               <-Map 16 [SIMPLE_EDGE]
                                                                 SHUFFLE [RS_27]
@@ -227,7 +227,7 @@ Stage-0
                                   <-Reducer 2 [SIMPLE_EDGE]
                                     SHUFFLE [RS_52]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_132] (rows=41342400 width=15)
+                                      Merge Join Operator [MERGEJOIN_136] (rows=41342400 width=15)
                                         Conds:RS_49._col2=RS_50._col0(Inner),Output:["_col0","_col1","_col3","_col5"]
                                       <-Map 1 [SIMPLE_EDGE]
                                         SHUFFLE [RS_49]

http://git-wip-us.apache.org/repos/asf/hive/blob/a926179f/ql/src/test/results/clientpositive/perf/tez/query78.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query78.q.out b/ql/src/test/results/clientpositive/perf/tez/query78.q.out
index 9c2d7b3..9ec4739 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query78.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query78.q.out
@@ -176,7 +176,7 @@ Stage-0
                                     Output:["_col0","_col1","_col2","_col4","_col5","_col6"]
                                     Filter Operator [FIL_58] (rows=79197206 width=135)
                                       predicate:_col8 is null
-                                      Merge Join Operator [MERGEJOIN_110] (rows=158394413 width=135)
+                                      Merge Join Operator [MERGEJOIN_108] (rows=158394413 width=135)
                                         Conds:RS_55._col2, _col3=RS_56._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
                                       <-Map 17 [SIMPLE_EDGE]
                                         SHUFFLE [RS_55]
@@ -213,7 +213,7 @@ Stage-0
                                   PartitionCols:_col0, _col1
                                   Group By Operator [GBY_17] (rows=348477374 width=88)
                                     Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col4, _col3
-                                    Merge Join Operator [MERGEJOIN_107] (rows=348477374 width=88)
+                                    Merge Join Operator [MERGEJOIN_109] (rows=348477374 width=88)
                                       Conds:RS_13._col0=RS_14._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
                                     <-Map 1 [SIMPLE_EDGE]
                                       SHUFFLE [RS_13]
@@ -256,7 +256,7 @@ Stage-0
                                   PartitionCols:_col0, _col1
                                   Group By Operator [GBY_38] (rows=87121617 width=135)
                                     Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col4, _col3
-                                    Merge Join Operator [MERGEJOIN_109] (rows=87121617 width=135)
+                                    Merge Join Operator [MERGEJOIN_110] (rows=87121617 width=135)
                                       Conds:RS_34._col0=RS_35._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
                                     <-Map 1 [SIMPLE_EDGE]
                                       SHUFFLE [RS_34]
@@ -269,7 +269,7 @@ Stage-0
                                           Output:["_col0","_col1","_col2","_col4","_col5","_col6"]
                                           Filter Operator [FIL_32] (rows=79201469 width=135)
                                             predicate:_col8 is null
-                                            Merge Join Operator [MERGEJOIN_108] (rows=158402938 width=135)
+                                            Merge Join Operator [MERGEJOIN_107] (rows=158402938 width=135)
                                               Conds:RS_29._col1, _col3=RS_30._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
                                             <-Map 14 [SIMPLE_EDGE]
                                               SHUFFLE [RS_29]


[5/9] hive git commit: HIVE-18759: Remove unconnected q.out-s (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
deleted file mode 100644
index c8959ef..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
+++ /dev/null
@@ -1,943 +0,0 @@
-WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 183488 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: ((UDFToDouble(cbigint) < cdouble) and (cint > 0)) (type: boolean)
-                    Statistics: Num rows: 1365 Data size: 20400 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cbigint (type: bigint), cdouble (type: double)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE
-                      Limit
-                        Number of rows: 7
-                        Statistics: Num rows: 7 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 7 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 7
-      Processor Tree:
-        ListSink
-
-WARNING: Comparing a bigint and a double may result in a loss of precision.
-PREHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--1887561756	-10011.0
--1887561756	-13877.0
--1887561756	-2281.0
--1887561756	-8881.0
--1887561756	10361.0
--1887561756	1839.0
--1887561756	9531.0
-PREHOOK: query: explain vectorization detail
-select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 146796 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:tinyint)
-                    predicate: ctinyint is not null (type: boolean)
-                    Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: ctinyint (type: tinyint), cdouble (type: double), csmallint (type: smallint)
-                      outputColumnNames: _col0, _col1, _col2
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 5, 1]
-                      Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint), _col1 (type: double)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 5]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col2 (type: smallint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:tinyint, KEY.reducesinkkey1:double, VALUE._col0:smallint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: smallint)
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--64	-10462.0	-10462
--64	-15920.0	-15920
--64	-1600.0	-1600
--64	-200.0	-200
--64	-2919.0	-2919
--64	-3097.0	-3097
--64	-3586.0	-3586
--64	-4018.0	-4018
--64	-4040.0	-4040
--64	-4803.0	-4803
--64	-6907.0	-6907
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-7196.0	-7196
--64	-8080.0	-8080
--64	-9842.0	-9842
-PREHOOK: query: explain vectorization detail
-select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double)
-                    outputColumnNames: _col0, _col1
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 13]
-                        selectExpressions: DoubleColAddDoubleScalar(col 5:double, val 1.0) -> 13:double
-                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: avg(_col1)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFAvgDouble(col 13:double) -> struct<count:bigint,sum:double,input:double>
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
-                      keys: _col0 (type: tinyint)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 128 Data size: 10628 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            partitionColumnNums: [0]
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 128 Data size: 10628 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:tinyint, VALUE._col0:struct<count:bigint,sum:double,input:double>
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFAvgFinal(col 1:struct<count:bigint,sum:double,input:double>) -> double
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: [0]
-                keys: KEY._col0 (type: tinyint)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 128 Data size: 1412 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46	3033.55
--47	-574.6428571428571
--48	1672.909090909091
--49	768.7659574468086
--50	-960.0192307692307
--51	-96.46341463414635
--52	2810.705882352941
--53	-532.7567567567568
--54	2712.7272727272725
--55	2385.595744680851
--56	2595.818181818182
--57	1867.0535714285713
--58	3483.2444444444445
--59	318.27272727272725
--60	1071.82
--61	914.3404255319149
--62	245.69387755102042
--63	2178.7272727272725
--64	373.52941176470586
-NULL	9370.0945309795
-PREHOOK: query: explain vectorization detail
-select distinct(ctinyint) from alltypesorc limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select distinct(ctinyint) from alltypesorc limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint)
-                    outputColumnNames: ctinyint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0]
-                    Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: ctinyint (type: tinyint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
-                        Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                        TopN Hash Memory Usage: 0.3
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: KEY._col0:tinyint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: tinyint)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 128 Data size: 388 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46
--47
--48
--49
--50
--51
--52
--53
--54
--55
--56
--57
--58
--59
--60
--61
--62
--63
--64
-NULL
-PREHOOK: query: explain vectorization detail
-select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: ctinyint, cdouble
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 5]
-                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 0:tinyint, col 5:double
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: ctinyint (type: tinyint), cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: tinyint), _col1 (type: double)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: tinyint)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 1]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            partitionColumnNums: [0]
-                            valueColumnNums: []
-                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:tinyint, KEY._col1:double
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:tinyint, col 1:double
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col1)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 1:double) -> bigint
-                      className: VectorGroupByOperator
-                      groupByMode: COMPLETE
-                      keyExpressions: col 0:tinyint
-                      native: false
-                      vectorProcessingMode: STREAMING
-                      projectedOutputColumnNums: [0]
-                  keys: _col0 (type: tinyint)
-                  mode: complete
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 128 Data size: 1412 Basic stats: COMPLETE Column stats: COMPLETE
-                  Limit
-                    Number of rows: 20
-                    Limit Vectorization:
-                        className: VectorLimitOperator
-                        native: true
-                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--46	24
--47	22
--48	29
--49	26
--50	30
--51	21
--52	33
--53	22
--54	26
--55	29
--56	36
--57	35
--58	23
--59	31
--60	27
--61	25
--62	27
--63	19
--64	24
-NULL	2932
-PREHOOK: query: explain vectorization detail
-select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 0
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail
-select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:tinyint)
-                    predicate: ctinyint is not null (type: boolean)
-                    Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: sum(ctinyint)
-                      Group By Vectorization:
-                          aggregators: VectorUDAFSumLong(col 0:tinyint) -> bigint
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 5:double
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: [0]
-                      keys: cdouble (type: double)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: double)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: double)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkMultiKeyOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: [1]
-                        Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 5]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY._col0:double, VALUE._col0:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: sum(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:double
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: [0]
-                keys: KEY._col0 (type: double)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: bigint), _col0 (type: double)
-                  sort order: ++
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkObjectHashOperator
-                      keyColumnNums: [1, 0]
-                      native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      valueColumnNums: []
-                  Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                  TopN Hash Memory Usage: 0.3
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY.reducesinkkey0:bigint, KEY.reducesinkkey1:double
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey1 (type: double), KEY.reducesinkkey0 (type: bigint)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [1, 0]
-                Statistics: Num rows: 4159 Data size: 58120 Basic stats: COMPLETE Column stats: COMPLETE
-                Limit
-                  Number of rows: 20
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
-                  Statistics: Num rows: 20 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
-                    Statistics: Num rows: 20 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 20
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--10462.0	-64
--1121.0	-89
--11322.0	-101
--11492.0	-78
--15920.0	-64
--4803.0	-64
--6907.0	-64
--7196.0	-2009
--8080.0	-64
--8118.0	-80
--9842.0	-64
-10496.0	-67
-15601.0	-1733
-3520.0	-86
-4811.0	-115
-5241.0	-80
-557.0	-75
-7705.0	-88
-9452.0	-76
-NULL	-32768

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out b/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
deleted file mode 100644
index 2b5a21e..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorization_parquet_projection.q.out
+++ /dev/null
@@ -1,684 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS parquet_types_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_types_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE parquet_types_staging (
-  cint int,
-  ctinyint tinyint,
-  csmallint smallint,
-  cfloat float,
-  cdouble double,
-  cstring1 string,
-  t timestamp,
-  cchar char(5),
-  cvarchar varchar(10),
-  cbinary string,
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>,
-  d date
-) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-COLLECTION ITEMS TERMINATED BY ','
-MAP KEYS TERMINATED BY ':'
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: CREATE TABLE parquet_types_staging (
-  cint int,
-  ctinyint tinyint,
-  csmallint smallint,
-  cfloat float,
-  cdouble double,
-  cstring1 string,
-  t timestamp,
-  cchar char(5),
-  cvarchar varchar(10),
-  cbinary string,
-  m1 map<string, varchar(3)>,
-  l1 array<int>,
-  st1 struct<c1:int, c2:char(1)>,
-  d date
-) ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|'
-COLLECTION ITEMS TERMINATED BY ','
-MAP KEYS TERMINATED BY ':'
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_types_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@parquet_types_staging
-PREHOOK: query: DROP TABLE IF EXISTS parquet_project_test
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_project_test
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE parquet_project_test(
-cint int,
-m1 map<string, string>
-) STORED AS PARQUET
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: CREATE TABLE parquet_project_test(
-cint int,
-m1 map<string, string>
-) STORED AS PARQUET
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_project_test
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","red") from parquet_types_staging
-where ctinyint = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","red") from parquet_types_staging
-where ctinyint = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","green") from parquet_types_staging
-where ctinyint = 2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","green") from parquet_types_staging
-where ctinyint = 2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","blue") from parquet_types_staging
-where ctinyint = 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: insert into parquet_project_test
-select ctinyint, map("color","blue") from parquet_types_staging
-where ctinyint = 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_project_test
-POSTHOOK: Lineage: parquet_project_test.cint EXPRESSION []
-POSTHOOK: Lineage: parquet_project_test.m1 EXPRESSION []
-PREHOOK: query: explain vectorization select * from parquet_project_test
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select * from parquet_project_test
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: cint (type: int), m1 (type: map<string,string>)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 22 Data size: 20328 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Vectorizing complex type MAP not enabled (map<string,string>) since hive.vectorized.complex.types.enabled IS false
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from parquet_project_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select * from parquet_project_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-1	{"color":"red"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-2	{"color":"green"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-3	{"color":"blue"}
-PREHOOK: query: explain vectorization select count(*) from parquet_project_test
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_project_test
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 22 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_project_test
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_project_test
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-22
-PREHOOK: query: explain vectorization select cint, count(*) from parquet_project_test
-group by cint
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select cint, count(*) from parquet_project_test
-group by cint
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: cint (type: int)
-                    outputColumnNames: cint
-                    Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: cint (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 22 Data size: 88 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select cint, count(*) from parquet_project_test
-group by cint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select cint, count(*) from parquet_project_test
-group by cint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-1	8
-2	7
-3	7
-PREHOOK: query: explain vectorization select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_project_test
-                  Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: m1['color'] (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 22 Data size: 20240 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-            Execution mode: llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                notVectorizedReason: Select expression for SELECT operator: Vectorizing complex type MAP not enabled (map<string,string>) since hive.vectorized.complex.types.enabled IS false
-                vectorized: false
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 11 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 11 Data size: 10120 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-POSTHOOK: query: select m1["color"], count(*) from parquet_project_test
-group by m1["color"]
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_project_test
-#### A masked pattern was here ####
-blue	7
-green	7
-red	8
-PREHOOK: query: create table if not exists parquet_nullsplit(key string, val string) partitioned by (len string)
-stored as parquet
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_nullsplit
-POSTHOOK: query: create table if not exists parquet_nullsplit(key string, val string) partitioned by (len string)
-stored as parquet
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_nullsplit
-PREHOOK: query: insert into table parquet_nullsplit partition(len='1')
-values ('one', 'red')
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@parquet_nullsplit@len=1
-POSTHOOK: query: insert into table parquet_nullsplit partition(len='1')
-values ('one', 'red')
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@parquet_nullsplit@len=1
-POSTHOOK: Lineage: parquet_nullsplit PARTITION(len=1).key SCRIPT []
-POSTHOOK: Lineage: parquet_nullsplit PARTITION(len=1).val SCRIPT []
-PREHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '1'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '1'
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_nullsplit
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs (cache only)
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_nullsplit where len = '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_nullsplit
-PREHOOK: Input: default@parquet_nullsplit@len=1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_nullsplit where len = '1'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_nullsplit
-POSTHOOK: Input: default@parquet_nullsplit@len=1
-#### A masked pattern was here ####
-1
-PREHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '99'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization select count(*) from parquet_nullsplit where len = '99'
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: parquet_nullsplit
-                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (len = '99') (type: boolean)
-                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: unknown
-            Map Vectorization:
-                enabled: true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from parquet_nullsplit where len = '99'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_nullsplit
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from parquet_nullsplit where len = '99'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_nullsplit
-#### A masked pattern was here ####
-0
-PREHOOK: query: drop table parquet_nullsplit
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_nullsplit
-PREHOOK: Output: default@parquet_nullsplit
-POSTHOOK: query: drop table parquet_nullsplit
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_nullsplit
-POSTHOOK: Output: default@parquet_nullsplit
-PREHOOK: query: drop table parquet_project_test
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_project_test
-PREHOOK: Output: default@parquet_project_test
-POSTHOOK: query: drop table parquet_project_test
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_project_test
-POSTHOOK: Output: default@parquet_project_test
-PREHOOK: query: drop table parquet_types_staging
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_types_staging
-PREHOOK: Output: default@parquet_types_staging
-POSTHOOK: query: drop table parquet_types_staging
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_types_staging
-POSTHOOK: Output: default@parquet_types_staging

http://git-wip-us.apache.org/repos/asf/hive/blob/99380fbd/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
deleted file mode 100644
index 673e607..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
+++ /dev/null
@@ -1,214 +0,0 @@
-PREHOOK: query: create temporary table x (a int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@x
-POSTHOOK: query: create temporary table x (a int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@x
-PREHOOK: query: create temporary table y (b int) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@y
-POSTHOOK: query: create temporary table y (b int) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@y
-PREHOOK: query: insert into x values(1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@x
-POSTHOOK: query: insert into x values(1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@x
-POSTHOOK: Lineage: x.a SCRIPT []
-PREHOOK: query: insert into y values(1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@y
-POSTHOOK: query: insert into y values(1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@y
-POSTHOOK: Lineage: y.b SCRIPT []
-PREHOOK: query: explain vectorization expression
-select count(1) from x, y where a = b
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
-select count(1) from x, y where a = b
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Map 1 <- Map 3 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: x
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:int)
-                    predicate: a is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: a (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0]
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      Map Join Operator
-                        condition map:
-                             Inner Join 0 to 1
-                        keys:
-                          0 _col0 (type: int)
-                          1 _col0 (type: int)
-                        Map Join Vectorization:
-                            className: VectorMapJoinInnerBigOnlyLongOperator
-                            native: true
-                            nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Fast Hash Table and No Hybrid Hash Join IS true
-                        input vertices:
-                          1 Map 3
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Group By Operator
-                          aggregations: count()
-                          Group By Vectorization:
-                              aggregators: VectorUDAFCountStar(*) -> bigint
-                              className: VectorGroupByOperator
-                              groupByMode: HASH
-                              native: false
-                              vectorProcessingMode: HASH
-                              projectedOutputColumnNums: [0]
-                          mode: hash
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          Reduce Output Operator
-                            sort order: 
-                            Reduce Sink Vectorization:
-                                className: VectorReduceSinkEmptyKeyOperator
-                                native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                            value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: y
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: SelectColumnIsNotNull(col 0:int)
-                    predicate: b is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: b (type: int)
-                      outputColumnNames: _col0
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0]
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                Group By Vectorization:
-                    aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0]
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(1) from x, y where a = b
-PREHOOK: type: QUERY
-PREHOOK: Input: default@x
-PREHOOK: Input: default@y
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from x, y where a = b
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@x
-POSTHOOK: Input: default@y
-#### A masked pattern was here ####
-1