You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/11/18 22:45:49 UTC
[01/23] hive git commit: HIVE-11180: Enable native vectorized map
join for spark [Spark Branch] (Rui reviewed by Xuefu)
Repository: hive
Updated Branches:
refs/heads/master 22499db20 -> fb944ee49
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
new file mode 100644
index 0000000..2c7cd5b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -0,0 +1,1406 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table sorted_mod_4 stored as orc
+as select ctinyint, pmod(cint, 4) as cmodint from alltypesorc
+where cint is not null and ctinyint is not null
+order by ctinyint
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sorted_mod_4
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table sorted_mod_4 stored as orc
+as select ctinyint, pmod(cint, 4) as cmodint from alltypesorc
+where cint is not null and ctinyint is not null
+order by ctinyint
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sorted_mod_4
+PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sorted_mod_4
+PREHOOK: Output: default@sorted_mod_4
+POSTHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sorted_mod_4
+POSTHOOK: Output: default@sorted_mod_4
+PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+PREHOOK: query: create table small_table stored
+as orc as select ctinyint, cbigint from alltypesorc limit 100
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_table
+POSTHOOK: query: create table small_table stored
+as orc as select ctinyint, cbigint from alltypesorc limit 100
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_table
+PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Output: default@small_table
+POSTHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Output: default@small_table
+PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select count(*) from (select s.*, st.*
+from sorted_mod_4 s
+left outer join small_table st
+on s.ctinyint = st.ctinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.*, st.*
+from sorted_mod_4 s
+left outer join small_table st
+on s.ctinyint = st.ctinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: st
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.*, st.*
+from sorted_mod_4 s
+left outer join small_table st
+on s.ctinyint = st.ctinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.*, st.*
+from sorted_mod_4 s
+left outer join small_table st
+on s.ctinyint = st.ctinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+6876
+PREHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.cmodint = 2
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.cmodint = 2
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {(_col1 = 2)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {(_col1 = 2)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.cmodint = 2
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.cmodint = 2
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+6058
+PREHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+6248
+PREHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.ctinyint < 100
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.ctinyint < 100
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {(_col0 < 100)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {(_col0 < 100)}
+ 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.ctinyint < 100
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint
+from sorted_mod_4 s
+left outer join small_table sm
+on s.ctinyint = sm.ctinyint and s.ctinyint < 100
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+6876
+PREHOOK: query: explain
+select count(*) from (select s.*, sm.*, s2.*
+from sorted_mod_4 s
+left outer join small_table sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join sorted_mod_4 s2
+ on s2.ctinyint = s.ctinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.*, sm.*, s2.*
+from sorted_mod_4 s
+left outer join small_table sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join sorted_mod_4 s2
+ on s2.ctinyint = s.ctinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cbigint (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 UDFToLong(_col1) (type: bigint)
+ 1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 UDFToLong(_col1) (type: bigint)
+ 1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 7329 Data size: 2440 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.*, sm.*, s2.*
+from sorted_mod_4 s
+left outer join small_table sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join sorted_mod_4 s2
+ on s2.ctinyint = s.ctinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table
+PREHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.*, sm.*, s2.*
+from sorted_mod_4 s
+left outer join small_table sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join sorted_mod_4 s2
+ on s2.ctinyint = s.ctinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table
+POSTHOOK: Input: default@sorted_mod_4
+#### A masked pattern was here ####
+3268334
+PREHOOK: query: create table mod_8_mod_4 stored as orc
+as select pmod(ctinyint, 8) as cmodtinyint, pmod(cint, 4) as cmodint from alltypesorc
+where cint is not null and ctinyint is not null
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mod_8_mod_4
+POSTHOOK: query: create table mod_8_mod_4 stored as orc
+as select pmod(ctinyint, 8) as cmodtinyint, pmod(cint, 4) as cmodint from alltypesorc
+where cint is not null and ctinyint is not null
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mod_8_mod_4
+PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Output: default@mod_8_mod_4
+POSTHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Output: default@mod_8_mod_4
+PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+#### A masked pattern was here ####
+PREHOOK: query: create table small_table2 stored
+as orc as select pmod(ctinyint, 16) as cmodtinyint, cbigint from alltypesorc limit 100
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_table2
+POSTHOOK: query: create table small_table2 stored
+as orc as select pmod(ctinyint, 16) as cmodtinyint, cbigint from alltypesorc limit 100
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_table2
+PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table2
+PREHOOK: Output: default@small_table2
+POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table2
+POSTHOOK: Output: default@small_table2
+PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select count(*) from (select s.*, st.*
+from mod_8_mod_4 s
+left outer join small_table2 st
+on s.cmodtinyint = st.cmodtinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.*, st.*
+from mod_8_mod_4 s
+left outer join small_table2 st
+on s.cmodtinyint = st.cmodtinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: st
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.*, st.*
+from mod_8_mod_4 s
+left outer join small_table2 st
+on s.cmodtinyint = st.cmodtinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.*, st.*
+from mod_8_mod_4 s
+left outer join small_table2 st
+on s.cmodtinyint = st.cmodtinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+39112
+PREHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {(_col1 = 2)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {(_col1 = 2)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+11171
+PREHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {((_col0 pmod 4) = _col1)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {((_col0 pmod 4) = _col1)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+14371
+PREHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ filter predicates:
+ 0 {(_col0 < 3)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ filter predicates:
+ 0 {(_col0 < 3)}
+ 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint
+from mod_8_mod_4 s
+left outer join small_table2 sm
+on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+17792
+PREHOOK: query: explain
+select count(*) from (select s.*, sm.*, s2.*
+from mod_8_mod_4 s
+left outer join small_table2 sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join mod_8_mod_4 s2
+ on s2.cmodtinyint = s.cmodtinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s.*, sm.*, s2.*
+from mod_8_mod_4 s
+left outer join small_table2 sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join mod_8_mod_4 s2
+ on s2.cmodtinyint = s.cmodtinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: sm
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cbigint (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 UDFToLong(_col1) (type: bigint)
+ 1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cmodtinyint (type: int), cmodint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 UDFToLong(_col1) (type: bigint)
+ 1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 7329 Data size: 3369 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (select s.*, sm.*, s2.*
+from mod_8_mod_4 s
+left outer join small_table2 sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join mod_8_mod_4 s2
+ on s2.cmodtinyint = s.cmodtinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select s.*, sm.*, s2.*
+from mod_8_mod_4 s
+left outer join small_table2 sm
+ on pmod(sm.cbigint, 8) = s.cmodint
+left outer join mod_8_mod_4 s2
+ on s2.cmodtinyint = s.cmodtinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Input: default@small_table2
+#### A masked pattern was here ####
+6524438
[11/23] hive git commit: HIVE-12091: Merge file doesn't work for ORC
table when running on Spark. [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
HIVE-12091: Merge file doesn't work for ORC table when running on Spark. [Spark Branch] (Rui reviewed by Xuefu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/13eb4095
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/13eb4095
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/13eb4095
Branch: refs/heads/master
Commit: 13eb40954935ae7b84a71603efed2b98857d46a7
Parents: 70eeadd
Author: Rui Li <ru...@intel.com>
Authored: Wed Oct 14 10:25:16 2015 +0800
Committer: Rui Li <ru...@intel.com>
Committed: Wed Oct 14 10:26:27 2015 +0800
----------------------------------------------------------------------
.../test/resources/testconfiguration.properties | 11 +
.../hive/ql/exec/spark/SparkPlanGenerator.java | 16 +
ql/src/test/queries/clientpositive/orc_merge1.q | 2 +
ql/src/test/queries/clientpositive/orc_merge2.q | 1 +
ql/src/test/queries/clientpositive/orc_merge3.q | 1 +
ql/src/test/queries/clientpositive/orc_merge4.q | 2 +
ql/src/test/queries/clientpositive/orc_merge5.q | 3 +
ql/src/test/queries/clientpositive/orc_merge6.q | 3 +
ql/src/test/queries/clientpositive/orc_merge7.q | 3 +
ql/src/test/queries/clientpositive/orc_merge8.q | 2 +
ql/src/test/queries/clientpositive/orc_merge9.q | 1 +
.../clientpositive/orc_merge_incompat1.q | 1 +
.../clientpositive/orc_merge_incompat2.q | 1 +
.../clientpositive/spark/orc_merge1.q.out | 485 +++++++++++++++
.../clientpositive/spark/orc_merge2.q.out | 268 ++++++++
.../clientpositive/spark/orc_merge3.q.out | 207 +++++++
.../clientpositive/spark/orc_merge4.q.out | 231 +++++++
.../clientpositive/spark/orc_merge5.q.out | 334 ++++++++++
.../clientpositive/spark/orc_merge6.q.out | 508 +++++++++++++++
.../clientpositive/spark/orc_merge7.q.out | 619 +++++++++++++++++++
.../clientpositive/spark/orc_merge8.q.out | 130 ++++
.../clientpositive/spark/orc_merge9.q.out | 186 ++++++
.../spark/orc_merge_incompat1.q.out | 240 +++++++
.../spark/orc_merge_incompat2.q.out | 370 +++++++++++
24 files changed, 3625 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 9b5fea4..72dbcec 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1167,6 +1167,17 @@ miniSparkOnYarn.query.files=auto_sortmerge_join_16.q,\
load_fs2.q,\
load_hdfs_file_with_space_in_the_name.q,\
optrstat_groupby.q,\
+ orc_merge1.q,\
+ orc_merge2.q,\
+ orc_merge3.q,\
+ orc_merge4.q,\
+ orc_merge5.q,\
+ orc_merge6.q,\
+ orc_merge7.q,\
+ orc_merge8.q,\
+ orc_merge9.q,\
+ orc_merge_incompat1.q,\
+ orc_merge_incompat2.q,\
parallel_orderby.q,\
ql_rewrite_gbtoidx.q,\
ql_rewrite_gbtoidx_cbo_1.q,\
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 762ce7d..d2c5245 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.exec.spark;
+import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -27,6 +28,7 @@ import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper;
@@ -220,6 +222,20 @@ public class SparkPlanGenerator {
byte[] confBytes = KryoSerializer.serializeJobConf(newJobConf);
boolean caching = isCachingWork(work, sparkWork);
if (work instanceof MapWork) {
+ // Create tmp dir for MergeFileWork
+ if (work instanceof MergeFileWork) {
+ Path outputPath = ((MergeFileWork) work).getOutputDir();
+ Path tempOutPath = Utilities.toTempPath(outputPath);
+ FileSystem fs = outputPath.getFileSystem(jobConf);
+ try {
+ if (!fs.exists(tempOutPath)) {
+ fs.mkdirs(tempOutPath);
+ }
+ } catch (IOException e) {
+ throw new RuntimeException(
+ "Can't make path " + outputPath + " : " + e.getMessage());
+ }
+ }
MapTran mapTran = new MapTran(caching);
HiveMapFunction mapFunc = new HiveMapFunction(confBytes, sparkReporter);
mapTran.setMapFunction(mapFunc);
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q b/ql/src/test/queries/clientpositive/orc_merge1.q
index a8ac85b..afef1e5 100644
--- a/ql/src/test/queries/clientpositive/orc_merge1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge1.q
@@ -10,6 +10,7 @@ set tez.grouping.max-size=2000;
set hive.merge.tezfiles=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
-- SORT_QUERY_RESULTS
@@ -39,6 +40,7 @@ dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
-- auto-merge slow way
EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge2.q b/ql/src/test/queries/clientpositive/orc_merge2.q
index 44ef280..6d229f1 100644
--- a/ql/src/test/queries/clientpositive/orc_merge2.q
+++ b/ql/src/test/queries/clientpositive/orc_merge2.q
@@ -2,6 +2,7 @@ set hive.explain.user=false;
set hive.merge.orcfile.stripe.level=true;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.merge.sparkfiles=true;
DROP TABLE orcfile_merge2a;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge3.q b/ql/src/test/queries/clientpositive/orc_merge3.q
index 9722e6d..f5600c6 100644
--- a/ql/src/test/queries/clientpositive/orc_merge3.q
+++ b/ql/src/test/queries/clientpositive/orc_merge3.q
@@ -1,5 +1,6 @@
set hive.explain.user=false;
set hive.merge.orcfile.stripe.level=true;
+set hive.merge.sparkfiles=true;
DROP TABLE orcfile_merge3a;
DROP TABLE orcfile_merge3b;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge4.q b/ql/src/test/queries/clientpositive/orc_merge4.q
index 3b50465..536e717 100644
--- a/ql/src/test/queries/clientpositive/orc_merge4.q
+++ b/ql/src/test/queries/clientpositive/orc_merge4.q
@@ -9,12 +9,14 @@ CREATE TABLE orcfile_merge3a (key int, value string)
CREATE TABLE orcfile_merge3b (key int, value string) STORED AS TEXTFILE;
set hive.merge.mapfiles=false;
+set hive.merge.sparkfiles=false;
INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
SELECT * FROM src;
dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=1/;
set hive.merge.mapfiles=true;
+set hive.merge.sparkfiles=true;
INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
SELECT * FROM src;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge5.q b/ql/src/test/queries/clientpositive/orc_merge5.q
index 3d32875..c24c407 100644
--- a/ql/src/test/queries/clientpositive/orc_merge5.q
+++ b/ql/src/test/queries/clientpositive/orc_merge5.q
@@ -17,6 +17,7 @@ set hive.merge.mapredfiles=false;
set hive.compute.splits.in.am=true;
set tez.grouping.min-size=1000;
set tez.grouping.max-size=50000;
+set hive.merge.sparkfiles=false;
-- 3 mappers
explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
@@ -31,6 +32,7 @@ set hive.merge.orcfile.stripe.level=true;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
-- 3 mappers
explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
@@ -45,6 +47,7 @@ set hive.merge.orcfile.stripe.level=false;
set hive.merge.tezfiles=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
analyze table orc_merge5b compute statistics noscan;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge6.q b/ql/src/test/queries/clientpositive/orc_merge6.q
index 6bdaa9e..1612a8b 100644
--- a/ql/src/test/queries/clientpositive/orc_merge6.q
+++ b/ql/src/test/queries/clientpositive/orc_merge6.q
@@ -18,6 +18,7 @@ set hive.merge.mapredfiles=false;
set hive.compute.splits.in.am=true;
set tez.grouping.min-size=1000;
set tez.grouping.max-size=50000;
+set hive.merge.sparkfiles=false;
-- 3 mappers
explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
@@ -36,6 +37,7 @@ set hive.merge.orcfile.stripe.level=true;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
-- 3 mappers
explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
@@ -54,6 +56,7 @@ set hive.merge.orcfile.stripe.level=false;
set hive.merge.tezfiles=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge7.q b/ql/src/test/queries/clientpositive/orc_merge7.q
index 7a351c6..49b81bf 100644
--- a/ql/src/test/queries/clientpositive/orc_merge7.q
+++ b/ql/src/test/queries/clientpositive/orc_merge7.q
@@ -22,6 +22,7 @@ set tez.grouping.max-size=50000;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
set hive.optimize.sort.dynamic.partition=false;
+set hive.merge.sparkfiles=false;
-- 3 mappers
explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
@@ -40,6 +41,7 @@ set hive.merge.orcfile.stripe.level=true;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
-- 3 mappers
explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
@@ -58,6 +60,7 @@ set hive.merge.orcfile.stripe.level=false;
set hive.merge.tezfiles=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge8.q b/ql/src/test/queries/clientpositive/orc_merge8.q
index 61ea4bf..30a892b 100644
--- a/ql/src/test/queries/clientpositive/orc_merge8.q
+++ b/ql/src/test/queries/clientpositive/orc_merge8.q
@@ -30,6 +30,7 @@ set hive.merge.orcfile.stripe.level=false;
set hive.merge.tezfiles=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
insert overwrite table alltypes_orc select * from alltypes;
insert into table alltypes_orc select * from alltypes;
@@ -40,6 +41,7 @@ set hive.merge.orcfile.stripe.level=true;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
alter table alltypes_orc concatenate;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge9.q b/ql/src/test/queries/clientpositive/orc_merge9.q
index 010b5a1..5f387ba 100644
--- a/ql/src/test/queries/clientpositive/orc_merge9.q
+++ b/ql/src/test/queries/clientpositive/orc_merge9.q
@@ -15,6 +15,7 @@ set hive.merge.orcfile.stripe.level=true;
set hive.merge.tezfiles=true;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
+set hive.merge.sparkfiles=true;
select count(*) from ts_merge;
alter table ts_merge concatenate;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
index dd58524..b9f6246 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
@@ -10,6 +10,7 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
set hive.merge.orcfile.stripe.level=false;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
+set hive.merge.sparkfiles=false;
-- 3 mappers
explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
index a8f8842..11d16c2 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
@@ -22,6 +22,7 @@ set tez.am.grouping.max-size=50000;
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
set hive.optimize.sort.dynamic.partition=false;
+set hive.merge.sparkfiles=false;
explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
set hive.exec.orc.default.row.index.stride=1000;
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
new file mode 100644
index 0000000..86df0a7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
@@ -0,0 +1,485 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1c
+PREHOOK: query: -- merge disabled
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- merge disabled
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge slow way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge slow way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1b@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge fast way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge fast way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1c
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1c
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1c@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1b WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1b WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1c WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1c WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Output: default@orcfile_merge1c
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
new file mode 100644
index 0000000..b7f1a65
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
@@ -0,0 +1,268 @@
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge2a
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ one 1
+ three
+ two
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge2a@one=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=7
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Output: default@orcfile_merge2a
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge3.q.out b/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
new file mode 100644
index 0000000..81a6013
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
@@ -0,0 +1,207 @@
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3b
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orcfile_merge3a
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge3b
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge3b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+POSTHOOK: Output: default@orcfile_merge3b
+POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3b
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3b
+POSTHOOK: Output: default@orcfile_merge3b
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge4.q.out b/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
new file mode 100644
index 0000000..8d433b0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
@@ -0,0 +1,231 @@
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3b
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orcfile_merge3a
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+POSTHOOK: Output: default@orcfile_merge3b
+POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3b
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3b
+POSTHOOK: Output: default@orcfile_merge3b
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
new file mode 100644
index 0000000..83721f5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
@@ -0,0 +1,334 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: explain alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: query: explain alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
[21/23] hive git commit: HIVE-12433: Merge master into spark
11/17/2015
Posted by xu...@apache.org.
HIVE-12433: Merge master into spark 11/17/2015
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/50b62ca6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/50b62ca6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/50b62ca6
Branch: refs/heads/master
Commit: 50b62ca6b3b52c56c030d24ee93e457765221c2a
Parents: c771306 2ff433f
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Tue Nov 17 08:01:45 2015 -0800
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Tue Nov 17 08:01:45 2015 -0800
----------------------------------------------------------------------
accumulo-handler/pom.xml | 12 +-
.../apache/hive/beeline/cli/TestHiveCli.java | 7 +-
beeline/src/test/resources/hive-site.xml | 11 +
.../hadoop/hive/cli/OptionsProcessor.java | 22 +-
common/pom.xml | 24 +-
.../apache/hadoop/hive/common/JavaUtils.java | 11 +-
.../org/apache/hadoop/hive/conf/HiveConf.java | 10 +-
data/conf/hive-site.xml | 10 +
data/conf/llap/hive-site.xml | 10 +
data/conf/spark/standalone/hive-site.xml | 10 +
data/conf/spark/yarn-client/hive-site.xml | 11 +
data/conf/tez/hive-site.xml | 10 +
data/files/parquet_type_promotion.txt | 4 +
hbase-handler/pom.xml | 44 +-
.../test/queries/positive/hbase_binary_binary.q | 12 +
.../results/positive/hbase_binary_binary.q.out | 54 +
hcatalog/core/pom.xml | 64 +-
hcatalog/hcatalog-pig-adapter/pom.xml | 24 +-
hcatalog/streaming/pom.xml | 20 +
hcatalog/webhcat/svr/pom.xml | 12 +-
hwi/pom.xml | 12 +-
itests/hcatalog-unit/pom.xml | 48 +-
itests/hive-minikdc/pom.xml | 48 +-
itests/hive-unit-hadoop2/pom.xml | 42 +-
itests/hive-unit/pom.xml | 96 +-
.../metastore/hbase/TestHBaseSchemaTool.java | 6 +-
.../metastore/hbase/TestHBaseSchemaTool2.java | 4 +-
.../cli/session/TestHiveSessionImpl.java | 1 +
itests/qtest-accumulo/pom.xml | 84 +-
itests/qtest-spark/pom.xml | 50 +
itests/qtest/pom.xml | 72 +-
itests/util/pom.xml | 24 +-
llap-client/pom.xml | 24 +-
llap-server/pom.xml | 58 +-
metastore/pom.xml | 36 +-
.../upgrade/mysql/022-HIVE-11970.mysql.sql | 12 +-
.../upgrade/mysql/hive-schema-1.3.0.mysql.sql | 2 +-
.../upgrade/mysql/hive-schema-2.0.0.mysql.sql | 2 +-
.../hive/metastore/hbase/HBaseSchemaTool.java | 4 -
pom.xml | 46 +-
ql/pom.xml | 76 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 10 +-
.../apache/hadoop/hive/ql/exec/MoveTask.java | 63 +-
.../apache/hadoop/hive/ql/exec/Utilities.java | 28 +
.../hive/ql/exec/mr/HadoopJobExecHelper.java | 11 +-
.../hive/ql/exec/tez/HiveSplitGenerator.java | 28 +-
.../hadoop/hive/ql/io/HiveFileFormatUtils.java | 166 +-
.../ql/io/SequenceFileInputFormatChecker.java | 3 +-
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 4 +
.../convert/DataWritableRecordConverter.java | 6 +-
.../ql/io/parquet/convert/ETypeConverter.java | 78 +-
.../convert/HiveCollectionConverter.java | 38 +-
.../io/parquet/convert/HiveGroupConverter.java | 21 +-
.../io/parquet/convert/HiveStructConverter.java | 86 +-
.../hive/ql/io/parquet/convert/Repeated.java | 9 +-
.../parquet/read/DataWritableReadSupport.java | 11 +-
.../BucketingSortingReduceSinkOptimizer.java | 3 +
.../optimizer/FixedBucketPruningOptimizer.java | 319 +++
.../hadoop/hive/ql/optimizer/Optimizer.java | 7 +
.../hive/ql/optimizer/SkewJoinOptimizer.java | 27 +-
.../ql/optimizer/calcite/HiveCalciteUtil.java | 20 +-
.../rules/HiveAggregateJoinTransposeRule.java | 8 +
.../translator/PlanModifierForASTConv.java | 10 +-
.../hadoop/hive/ql/parse/GenTezUtils.java | 4 +
.../hive/ql/parse/LoadSemanticAnalyzer.java | 49 +-
.../org/apache/hadoop/hive/ql/plan/MapWork.java | 14 +
.../hadoop/hive/ql/plan/TableScanDesc.java | 39 +
.../udf/generic/GenericUDFFromUtcTimestamp.java | 11 +-
.../queries/clientpositive/bucketpruning1.q | 97 +
.../clientpositive/insertoverwrite_bucket.q | 12 +
.../queries/clientpositive/parquet_columnar.q | 4 +
.../clientpositive/parquet_type_promotion.q | 72 +
.../test/queries/clientpositive/skewjoinopt21.q | 30 +
.../special_character_in_tabnames_1.q | 2 +-
.../special_character_in_tabnames_2.q | 2 +
.../clientpositive/udf_from_utc_timestamp.q | 5 +
.../clientnegative/archive_corrupt.q.out | 14 +-
.../clientnegative/load_orc_negative1.q.out | 2 +-
.../clientnegative/load_orc_negative2.q.out | 2 +-
.../clientnegative/load_orc_negative3.q.out | 2 +-
.../clientnegative/load_orc_negative_part.q.out | 2 +-
.../clientnegative/load_wrong_fileformat.q.out | 7 +-
.../load_wrong_fileformat_rc_seq.q.out | 7 +-
.../load_wrong_fileformat_txt_seq.q.out | 7 +-
.../results/clientpositive/bucketpruning1.q.out | 2282 +++++++++++++++++
.../clientpositive/insertoverwrite_bucket.q.out | 87 +
.../clientpositive/parquet_columnar.q.out | 37 +
.../clientpositive/parquet_type_promotion.q.out | 232 ++
.../results/clientpositive/skewjoinopt21.q.out | 230 ++
.../special_character_in_tabnames_2.q.out | 10 +-
.../clientpositive/tez/bucketpruning1.q.out | 2360 ++++++++++++++++++
.../clientpositive/udf_from_utc_timestamp.q.out | 13 +
serde/pom.xml | 24 +-
.../hadoop/hive/serde2/lazy/LazyBinary.java | 2 +-
.../hadoop/hive/serde2/lazy/LazyFactory.java | 3 +
.../hadoop/hive/serde2/lazy/LazyUtils.java | 6 +
.../hive/serde2/lazydio/LazyDioBinary.java | 51 +
service/pom.xml | 12 +-
.../hive/service/cli/operation/Operation.java | 23 +
.../service/cli/operation/SQLOperation.java | 2 +
.../service/cli/session/HiveSessionBase.java | 3 -
.../service/cli/session/HiveSessionImpl.java | 14 +
shims/0.23/pom.xml | 48 +-
shims/common/pom.xml | 11 +-
spark-client/pom.xml | 12 +-
storage-api/pom.xml | 12 +-
106 files changed, 7542 insertions(+), 401 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/50b62ca6/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/50b62ca6/pom.xml
----------------------------------------------------------------------
[08/23] hive git commit: HIVE-11844: Merge master to Spark branch
9/16/2015 [Spark Branch]
Posted by xu...@apache.org.
HIVE-11844: Merge master to Spark branch 9/16/2015 [Spark Branch]
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70eeadd2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70eeadd2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70eeadd2
Branch: refs/heads/master
Commit: 70eeadd2f019dcb2e301690290c8807731eab7a1
Parents: f78f663 1cce5f0
Author: xzhang <xz...@xzdt>
Authored: Wed Sep 16 12:16:30 2015 -0700
Committer: xzhang <xz...@xzdt>
Committed: Wed Sep 16 12:16:30 2015 -0700
----------------------------------------------------------------------
accumulo-handler/pom.xml | 4 -
.../apache/hadoop/hive/ant/GenVectorCode.java | 105 +
.../java/org/apache/hive/beeline/BeeLine.java | 13 +-
.../org/apache/hive/beeline/BeeLineOpts.java | 12 +-
.../org/apache/hive/beeline/HiveSchemaTool.java | 14 +-
.../src/main/resources/beeline-log4j.properties | 24 -
beeline/src/main/resources/beeline-log4j2.xml | 39 +
bin/ext/beeline.sh | 2 +-
bin/hive | 3 +
common/pom.xml | 22 +-
.../apache/hadoop/hive/common/JavaUtils.java | 11 +-
.../org/apache/hadoop/hive/common/LogUtils.java | 18 +-
.../hadoop/hive/common/jsonexplain/tez/Op.java | 8 +-
.../hive/common/jsonexplain/tez/Stage.java | 14 +-
.../common/jsonexplain/tez/TezJsonParser.java | 17 +-
.../org/apache/hadoop/hive/conf/HiveConf.java | 120 +-
common/src/main/resources/hive-log4j.properties | 88 -
common/src/main/resources/hive-log4j2.xml | 110 +
.../hadoop/hive/conf/TestHiveLogging.java | 8 +-
.../resources/hive-exec-log4j-test.properties | 59 -
.../test/resources/hive-exec-log4j2-test.xml | 85 +
.../test/resources/hive-log4j-test.properties | 71 -
common/src/test/resources/hive-log4j2-test.xml | 94 +
data/conf/hive-log4j-old.properties | 82 -
data/conf/hive-log4j.properties | 97 -
data/conf/hive-log4j2.xml | 143 +
data/conf/hive-site.xml | 6 -
data/conf/spark/log4j.properties | 24 -
data/conf/spark/log4j2.xml | 74 +
data/conf/tez/hive-site.xml | 9 +
data/files/dynpartdata1.txt | 5 +
data/files/dynpartdata2.txt | 6 +
docs/xdocs/language_manual/cli.xml | 2 +-
errata.txt | 10 +
.../hadoop/hive/hbase/ColumnMappings.java | 5 +
.../apache/hadoop/hive/hbase/HBaseSerDe.java | 19 +-
.../hadoop/hive/hbase/HBaseSerDeParameters.java | 8 +-
.../hadoop/hive/hbase/HBaseStorageHandler.java | 13 +-
.../hive/hbase/HiveHBaseInputFormatUtil.java | 50 +-
.../hadoop/hive/hbase/LazyHBaseCellMap.java | 19 +-
.../apache/hadoop/hive/hbase/LazyHBaseRow.java | 5 +-
.../hadoop/hive/hbase/LazyHBaseCellMapTest.java | 72 +
.../positive/hbase_binary_map_queries_prefix.q | 15 +-
.../queries/positive/hbase_null_first_col.q | 22 +
.../hbase_binary_map_queries_prefix.q.out | 40 +
.../results/positive/hbase_null_first_col.q.out | 109 +
.../test/results/positive/hbase_timestamp.q.out | 8 +-
hcatalog/bin/hcat_server.sh | 2 +-
hcatalog/bin/templeton.cmd | 4 +-
.../mapreduce/DefaultOutputFormatContainer.java | 7 +-
...namicPartitionFileRecordWriterContainer.java | 3 +-
.../mapreduce/FileOutputFormatContainer.java | 3 +-
hcatalog/scripts/hcat_server_start.sh | 2 +-
.../content/xdocs/configuration.xml | 2 +-
.../src/documentation/content/xdocs/install.xml | 2 +-
.../deployers/config/hive/hive-log4j.properties | 88 -
.../deployers/config/hive/hive-log4j2.xml | 110 +
.../deployers/config/hive/hive-site.mysql.xml | 22 +
.../templeton/deployers/start_hive_services.sh | 2 +-
.../hive/hcatalog/streaming/TestStreaming.java | 54 +-
.../webhcat/svr/src/main/bin/webhcat_server.sh | 4 +-
.../src/main/config/webhcat-log4j.properties | 45 -
.../svr/src/main/config/webhcat-log4j2.xml | 74 +
.../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 201 +-
.../java/org/apache/hive/hplsql/Column.java | 65 +
.../java/org/apache/hive/hplsql/Converter.java | 41 +-
.../main/java/org/apache/hive/hplsql/Exec.java | 184 +-
.../java/org/apache/hive/hplsql/Expression.java | 51 +-
.../main/java/org/apache/hive/hplsql/Meta.java | 216 +
.../main/java/org/apache/hive/hplsql/Row.java | 97 +
.../java/org/apache/hive/hplsql/Select.java | 63 +-
.../java/org/apache/hive/hplsql/Signal.java | 2 +-
.../main/java/org/apache/hive/hplsql/Stmt.java | 110 +-
.../main/java/org/apache/hive/hplsql/Var.java | 60 +-
.../apache/hive/hplsql/functions/Function.java | 62 +-
.../hive/hplsql/functions/FunctionDatetime.java | 14 +-
.../hive/hplsql/functions/FunctionMisc.java | 22 +-
.../hive/hplsql/functions/FunctionOra.java | 31 +-
.../hive/hplsql/functions/FunctionString.java | 46 +-
.../org/apache/hive/hplsql/TestHplsqlLocal.java | 12 +-
.../apache/hive/hplsql/TestHplsqlOffline.java | 76 +
.../test/queries/db/create_procedure_mssql.sql | 52 +
hplsql/src/test/queries/db/map_object.sql | 9 +
.../src/test/queries/db/rowtype_attribute.sql | 22 +
hplsql/src/test/queries/db/type_attribute.sql | 8 +
.../local/create_procedure_no_params.sql | 19 +
.../queries/local/exception_divide_by_zero.sql | 11 +
.../test/queries/offline/create_table_mssql.sql | 43 +
.../test/queries/offline/create_table_ora.sql | 53 +
.../results/db/create_procedure_mssql.out.txt | 45 +
hplsql/src/test/results/db/map_object.out.txt | 17 +
.../test/results/db/rowtype_attribute.out.txt | 42 +
.../src/test/results/db/type_attribute.out.txt | 15 +
.../test/results/local/create_function.out.txt | 4 +-
.../local/create_procedure_no_params.out.txt | 26 +
hplsql/src/test/results/local/declare.out.txt | 4 +-
.../local/exception_divide_by_zero.out.txt | 8 +
.../results/offline/create_table_mssql.out.txt | 24 +
.../results/offline/create_table_ora.out.txt | 42 +
.../benchmark/serde/LazySimpleSerDeBench.java | 453 +
.../vectorization/VectorizationBench.java | 93 +
.../hive/metastore/TestHiveMetaStore.java | 96 +-
.../hive/ql/security/FolderPermissionBase.java | 17 +-
.../TestOperationLoggingAPIWithMr.java | 2 -
.../TestOperationLoggingAPIWithTez.java | 2 -
.../operation/TestOperationLoggingLayout.java | 134 +
itests/pom.xml | 2 +-
itests/qtest/pom.xml | 26 +-
.../test/resources/testconfiguration.properties | 2 +
.../org/apache/hadoop/hive/ql/QTestUtil.java | 62 +-
jdbc/pom.xml | 1 +
.../org/apache/hive/jdbc/HiveConnection.java | 30 +-
.../org/apache/hive/jdbc/HiveStatement.java | 2 +-
jdbc/src/java/org/apache/hive/jdbc/Utils.java | 117 +-
.../hive/jdbc/ZooKeeperHiveClientHelper.java | 104 +-
metastore/if/hive_metastore.thrift | 5 +
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2046 ++-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 135 +
.../ThriftHiveMetastore_server.skeleton.cpp | 5 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 304 +-
.../gen/thrift/gen-cpp/hive_metastore_types.h | 47 +
.../hive/metastore/api/AbortTxnRequest.java | 2 +-
.../metastore/api/AddDynamicPartitions.java | 2 +-
.../metastore/api/AddPartitionsRequest.java | 2 +-
.../hive/metastore/api/AddPartitionsResult.java | 2 +-
.../hadoop/hive/metastore/api/AggrStats.java | 2 +-
.../metastore/api/AlreadyExistsException.java | 2 +-
.../metastore/api/BinaryColumnStatsData.java | 2 +-
.../metastore/api/BooleanColumnStatsData.java | 2 +-
.../hive/metastore/api/CheckLockRequest.java | 2 +-
.../hive/metastore/api/ColumnStatistics.java | 2 +-
.../metastore/api/ColumnStatisticsDesc.java | 2 +-
.../hive/metastore/api/ColumnStatisticsObj.java | 2 +-
.../hive/metastore/api/CommitTxnRequest.java | 2 +-
.../hive/metastore/api/CompactionRequest.java | 2 +-
.../api/ConfigValSecurityException.java | 2 +-
.../api/CurrentNotificationEventId.java | 2 +-
.../hadoop/hive/metastore/api/Database.java | 2 +-
.../apache/hadoop/hive/metastore/api/Date.java | 2 +-
.../hive/metastore/api/DateColumnStatsData.java | 2 +-
.../hadoop/hive/metastore/api/Decimal.java | 2 +-
.../metastore/api/DecimalColumnStatsData.java | 2 +-
.../metastore/api/DoubleColumnStatsData.java | 2 +-
.../hive/metastore/api/DropPartitionsExpr.java | 2 +-
.../metastore/api/DropPartitionsRequest.java | 2 +-
.../metastore/api/DropPartitionsResult.java | 2 +-
.../hive/metastore/api/EnvironmentContext.java | 2 +-
.../hadoop/hive/metastore/api/FieldSchema.java | 2 +-
.../hive/metastore/api/FireEventRequest.java | 2 +-
.../hive/metastore/api/FireEventResponse.java | 2 +-
.../hadoop/hive/metastore/api/Function.java | 2 +-
.../metastore/api/GetAllFunctionsResponse.java | 447 +
.../metastore/api/GetOpenTxnsInfoResponse.java | 2 +-
.../hive/metastore/api/GetOpenTxnsResponse.java | 2 +-
.../api/GetPrincipalsInRoleRequest.java | 2 +-
.../api/GetPrincipalsInRoleResponse.java | 2 +-
.../api/GetRoleGrantsForPrincipalRequest.java | 2 +-
.../api/GetRoleGrantsForPrincipalResponse.java | 2 +-
.../api/GrantRevokePrivilegeRequest.java | 2 +-
.../api/GrantRevokePrivilegeResponse.java | 2 +-
.../metastore/api/GrantRevokeRoleRequest.java | 2 +-
.../metastore/api/GrantRevokeRoleResponse.java | 2 +-
.../hive/metastore/api/HeartbeatRequest.java | 2 +-
.../metastore/api/HeartbeatTxnRangeRequest.java | 2 +-
.../api/HeartbeatTxnRangeResponse.java | 2 +-
.../hive/metastore/api/HiveObjectPrivilege.java | 2 +-
.../hive/metastore/api/HiveObjectRef.java | 2 +-
.../apache/hadoop/hive/metastore/api/Index.java | 2 +-
.../api/IndexAlreadyExistsException.java | 2 +-
.../metastore/api/InsertEventRequestData.java | 2 +-
.../metastore/api/InvalidInputException.java | 2 +-
.../metastore/api/InvalidObjectException.java | 2 +-
.../api/InvalidOperationException.java | 2 +-
.../api/InvalidPartitionException.java | 2 +-
.../hive/metastore/api/LockComponent.java | 2 +-
.../hadoop/hive/metastore/api/LockRequest.java | 2 +-
.../hadoop/hive/metastore/api/LockResponse.java | 2 +-
.../hive/metastore/api/LongColumnStatsData.java | 2 +-
.../hive/metastore/api/MetaException.java | 2 +-
.../hive/metastore/api/NoSuchLockException.java | 2 +-
.../metastore/api/NoSuchObjectException.java | 2 +-
.../hive/metastore/api/NoSuchTxnException.java | 2 +-
.../hive/metastore/api/NotificationEvent.java | 2 +-
.../metastore/api/NotificationEventRequest.java | 2 +-
.../api/NotificationEventResponse.java | 2 +-
.../hive/metastore/api/OpenTxnRequest.java | 2 +-
.../hive/metastore/api/OpenTxnsResponse.java | 2 +-
.../apache/hadoop/hive/metastore/api/Order.java | 2 +-
.../hadoop/hive/metastore/api/Partition.java | 2 +-
.../api/PartitionListComposingSpec.java | 2 +-
.../hive/metastore/api/PartitionSpec.java | 2 +-
.../api/PartitionSpecWithSharedSD.java | 2 +-
.../hive/metastore/api/PartitionWithoutSD.java | 2 +-
.../metastore/api/PartitionsByExprRequest.java | 2 +-
.../metastore/api/PartitionsByExprResult.java | 2 +-
.../metastore/api/PartitionsStatsRequest.java | 2 +-
.../metastore/api/PartitionsStatsResult.java | 2 +-
.../metastore/api/PrincipalPrivilegeSet.java | 2 +-
.../hadoop/hive/metastore/api/PrivilegeBag.java | 2 +-
.../hive/metastore/api/PrivilegeGrantInfo.java | 2 +-
.../hadoop/hive/metastore/api/ResourceUri.java | 2 +-
.../apache/hadoop/hive/metastore/api/Role.java | 2 +-
.../hive/metastore/api/RolePrincipalGrant.java | 2 +-
.../hadoop/hive/metastore/api/Schema.java | 2 +-
.../hadoop/hive/metastore/api/SerDeInfo.java | 2 +-
.../api/SetPartitionsStatsRequest.java | 2 +-
.../hive/metastore/api/ShowCompactRequest.java | 2 +-
.../hive/metastore/api/ShowCompactResponse.java | 2 +-
.../api/ShowCompactResponseElement.java | 2 +-
.../hive/metastore/api/ShowLocksRequest.java | 2 +-
.../hive/metastore/api/ShowLocksResponse.java | 2 +-
.../metastore/api/ShowLocksResponseElement.java | 2 +-
.../hadoop/hive/metastore/api/SkewedInfo.java | 2 +-
.../hive/metastore/api/StorageDescriptor.java | 2 +-
.../metastore/api/StringColumnStatsData.java | 2 +-
.../apache/hadoop/hive/metastore/api/Table.java | 2 +-
.../hive/metastore/api/TableStatsRequest.java | 2 +-
.../hive/metastore/api/TableStatsResult.java | 2 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 3140 ++--
.../hive/metastore/api/TxnAbortedException.java | 2 +-
.../hadoop/hive/metastore/api/TxnInfo.java | 2 +-
.../hive/metastore/api/TxnOpenException.java | 2 +-
.../apache/hadoop/hive/metastore/api/Type.java | 2 +-
.../hive/metastore/api/UnknownDBException.java | 2 +-
.../api/UnknownPartitionException.java | 2 +-
.../metastore/api/UnknownTableException.java | 2 +-
.../hive/metastore/api/UnlockRequest.java | 2 +-
.../hadoop/hive/metastore/api/Version.java | 2 +-
.../gen-php/metastore/ThriftHiveMetastore.php | 1265 +-
.../src/gen/thrift/gen-php/metastore/Types.php | 103 +
.../hive_metastore/ThriftHiveMetastore-remote | 21 +-
.../hive_metastore/ThriftHiveMetastore.py | 879 +-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 74 +
.../gen/thrift/gen-rb/hive_metastore_types.rb | 16 +
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 60 +
.../hadoop/hive/metastore/HiveAlterHandler.java | 2 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 53 +-
.../hive/metastore/HiveMetaStoreClient.java | 7 +
.../hive/metastore/HouseKeeperService.java | 39 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 4 +
.../hive/metastore/MetaStoreDirectSql.java | 69 +-
.../hadoop/hive/metastore/MetaStoreUtils.java | 17 +-
.../hadoop/hive/metastore/ObjectStore.java | 54 +-
.../apache/hadoop/hive/metastore/RawStore.java | 7 +
.../hive/metastore/RetryingMetaStoreClient.java | 32 +-
.../hive/metastore/tools/HiveMetaTool.java | 5 +
.../hadoop/hive/metastore/txn/TxnHandler.java | 178 +-
.../DummyRawStoreControlledCommit.java | 7 +
.../DummyRawStoreForJdoConnection.java | 6 +
.../metastore/txn/TestCompactionTxnHandler.java | 40 +-
.../hive/metastore/txn/TestTxnHandler.java | 73 +-
packaging/src/main/assembly/bin.xml | 17 +-
pom.xml | 41 +-
ql/pom.xml | 17 +-
.../hadoop/hive/ql/plan/api/Adjacency.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Graph.java | 2 +-
.../hadoop/hive/ql/plan/api/Operator.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Query.java | 2 +-
.../hadoop/hive/ql/plan/api/QueryPlan.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Stage.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Task.java | 2 +-
...tringGroupColumnCompareStringGroupColumn.txt | 112 +-
...gGroupColumnCompareStringGroupScalarBase.txt | 12 +-
...gGroupScalarCompareStringGroupColumnBase.txt | 12 +-
...tringGroupColumnCompareStringGroupColumn.txt | 112 +-
...gGroupColumnCompareStringGroupScalarBase.txt | 12 +-
...gGroupScalarCompareStringGroupColumnBase.txt | 12 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 30 +-
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 11 +-
.../apache/hadoop/hive/ql/exec/ExplainTask.java | 24 +-
.../hadoop/hive/ql/exec/FileSinkOperator.java | 2 +-
.../hadoop/hive/ql/exec/FunctionRegistry.java | 66 +-
.../hadoop/hive/ql/exec/FunctionTask.java | 2 +-
.../hadoop/hive/ql/exec/KeyWrapperFactory.java | 22 +-
.../hadoop/hive/ql/exec/MapJoinOperator.java | 5 +
.../apache/hadoop/hive/ql/exec/MoveTask.java | 30 +-
.../apache/hadoop/hive/ql/exec/Operator.java | 28 +-
.../hadoop/hive/ql/exec/ScriptOperator.java | 85 +-
.../apache/hadoop/hive/ql/exec/StatsTask.java | 13 +-
.../apache/hadoop/hive/ql/exec/Utilities.java | 26 +-
.../hadoop/hive/ql/exec/mr/ExecDriver.java | 30 +-
.../hive/ql/exec/mr/HadoopJobExecHelper.java | 20 +-
.../persistence/BytesBytesMultiHashMap.java | 11 +-
.../persistence/HybridHashTableContainer.java | 70 +-
.../ql/exec/spark/RemoteHiveSparkClient.java | 13 +-
.../spark/status/impl/LocalSparkJobStatus.java | 2 +-
.../spark/status/impl/RemoteSparkJobStatus.java | 2 +-
.../hadoop/hive/ql/exec/tez/DagUtils.java | 3 +
.../hive/ql/exec/tez/HashTableLoader.java | 7 +-
.../hadoop/hive/ql/exec/tez/InPlaceUpdates.java | 65 +
.../hadoop/hive/ql/exec/tez/TezJobMonitor.java | 66 +-
.../hive/ql/exec/tez/TezSessionState.java | 2 +
.../apache/hadoop/hive/ql/exec/tez/TezTask.java | 4 +-
.../ql/exec/vector/VectorHashKeyWrapper.java | 2 +-
.../ql/exec/vector/VectorizationContext.java | 56 +-
.../BRoundWithNumDigitsDoubleToDouble.java | 42 +
.../expressions/CastStringGroupToString.java | 40 +
.../ql/exec/vector/expressions/ColAndCol.java | 34 +-
.../ql/exec/vector/expressions/ColOrCol.java | 42 +-
.../exec/vector/expressions/CuckooSetBytes.java | 2 +-
.../ql/exec/vector/expressions/DecimalUtil.java | 18 +
.../vector/expressions/FilterExprAndExpr.java | 8 +-
.../vector/expressions/FilterExprOrExpr.java | 140 +-
...FuncBRoundWithNumDigitsDecimalToDecimal.java | 40 +
.../FuncRoundWithNumDigitsDecimalToDecimal.java | 14 +-
.../ql/exec/vector/expressions/MathExpr.java | 22 +
.../hive/ql/exec/vector/expressions/NotCol.java | 14 +-
.../ql/exec/vector/expressions/StringExpr.java | 51 +
...VectorMapJoinInnerBigOnlyStringOperator.java | 4 +-
.../VectorMapJoinInnerStringOperator.java | 4 +-
.../VectorMapJoinLeftSemiStringOperator.java | 4 +-
.../VectorMapJoinOuterStringOperator.java | 4 +-
.../apache/hadoop/hive/ql/hooks/ATSHook.java | 9 +-
.../hadoop/hive/ql/hooks/LineageInfo.java | 9 +-
.../hadoop/hive/ql/hooks/LineageLogger.java | 46 +-
.../hive/ql/hooks/PostExecOrcFileDump.java | 120 +
.../ql/hooks/PostExecTezSummaryPrinter.java | 72 +
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 183 +-
.../hive/ql/io/CombineHiveInputFormat.java | 39 -
.../apache/hadoop/hive/ql/io/orc/FileDump.java | 52 +-
.../apache/hadoop/hive/ql/io/orc/OrcConf.java | 2 +-
.../apache/hadoop/hive/ql/io/orc/OrcFile.java | 3 +
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 447 +-
.../apache/hadoop/hive/ql/io/orc/OrcSerde.java | 7 +-
.../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 25 +-
.../apache/hadoop/hive/ql/io/orc/OrcStruct.java | 2 +-
.../apache/hadoop/hive/ql/io/orc/Reader.java | 13 +
.../hadoop/hive/ql/io/orc/ReaderImpl.java | 260 +-
.../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 24 +-
.../hive/ql/io/orc/TreeReaderFactory.java | 18 +-
.../hive/ql/io/parquet/LeafFilterFactory.java | 43 +-
.../hive/ql/io/parquet/ProjectionPusher.java | 3 +-
.../parquet/read/DataWritableReadSupport.java | 10 +-
.../read/ParquetFilterPredicateConverter.java | 35 +-
.../serde/ParquetHiveArrayInspector.java | 12 +
.../ql/io/parquet/timestamp/NanoTimeUtils.java | 23 +-
.../ql/io/rcfile/stats/PartialScanTask.java | 20 +-
.../hive/ql/io/sarg/ConvertAstToSearchArg.java | 21 +-
.../hadoop/hive/ql/lib/DefaultGraphWalker.java | 84 +-
.../hadoop/hive/ql/lib/ForwardWalker.java | 35 +-
.../hadoop/hive/ql/lib/LevelOrderWalker.java | 153 +
.../hadoop/hive/ql/lib/PreOrderOnceWalker.java | 44 +
.../hadoop/hive/ql/lib/PreOrderWalker.java | 2 +-
.../apache/hadoop/hive/ql/lib/RuleRegExp.java | 22 +-
.../hadoop/hive/ql/lockmgr/DbLockManager.java | 12 +-
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 37 +-
.../hadoop/hive/ql/log/HiveEventCounter.java | 135 +
.../apache/hadoop/hive/ql/log/NullAppender.java | 63 +
.../ql/log/PidDailyRollingFileAppender.java | 33 -
.../hive/ql/log/PidFilePatternConverter.java | 62 +
.../apache/hadoop/hive/ql/metadata/Hive.java | 178 +-
.../hadoop/hive/ql/metadata/Partition.java | 2 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 2 +-
.../hadoop/hive/ql/metadata/TableIterable.java | 104 +
.../hadoop/hive/ql/optimizer/ColumnPruner.java | 8 +-
.../ql/optimizer/ColumnPrunerProcFactory.java | 3 +
.../hive/ql/optimizer/ConstantPropagate.java | 12 +-
.../optimizer/ConstantPropagateProcFactory.java | 217 +-
.../hive/ql/optimizer/ConvertJoinMapJoin.java | 22 +-
.../hive/ql/optimizer/GenMapRedUtils.java | 59 +-
.../hive/ql/optimizer/GroupByOptimizer.java | 58 +-
.../ql/optimizer/IdentityProjectRemover.java | 15 +
.../hadoop/hive/ql/optimizer/IndexUtils.java | 13 +-
.../hadoop/hive/ql/optimizer/Optimizer.java | 13 +
.../hive/ql/optimizer/PointLookupOptimizer.java | 378 +
.../ql/optimizer/ReduceSinkMapJoinProc.java | 19 +-
.../ql/optimizer/calcite/HiveRelOptUtil.java | 23 -
.../calcite/reloperators/HiveFilter.java | 2 +-
.../calcite/reloperators/HiveLimit.java | 57 -
.../calcite/reloperators/HiveSort.java | 85 -
.../calcite/reloperators/HiveSortLimit.java | 110 +
.../rules/HiveAggregateProjectMergeRule.java | 151 +
.../rules/HiveJoinProjectTransposeRule.java | 238 +-
.../calcite/rules/HiveRelFieldTrimmer.java | 145 +-
.../calcite/stats/HiveRelMdMemory.java | 9 +-
.../calcite/stats/HiveRelMdParallelism.java | 4 +-
.../calcite/translator/ASTConverter.java | 24 +-
.../calcite/translator/ExprNodeConverter.java | 70 +-
.../calcite/translator/HiveOpConverter.java | 110 +-
.../translator/PlanModifierForASTConv.java | 14 +-
.../translator/PlanModifierForReturnPath.java | 4 -
.../calcite/translator/PlanModifierUtil.java | 4 +-
.../calcite/translator/TypeConverter.java | 13 +-
.../correlation/AbstractCorrelationProcCtx.java | 7 +
.../correlation/CorrelationUtilities.java | 11 +-
.../correlation/ReduceSinkDeDuplication.java | 6 +-
.../ql/optimizer/index/RewriteCanApplyCtx.java | 8 +-
.../ql/optimizer/lineage/ExprProcFactory.java | 9 +-
.../hive/ql/optimizer/lineage/Generator.java | 4 +-
.../hive/ql/optimizer/lineage/LineageCtx.java | 34 +-
.../ql/optimizer/lineage/OpProcFactory.java | 10 +-
.../annotation/AnnotateWithOpTraits.java | 6 +-
.../ql/optimizer/pcr/PcrExprProcFactory.java | 154 +-
.../physical/NullScanTaskDispatcher.java | 6 +-
.../hive/ql/optimizer/physical/Vectorizer.java | 15 +-
.../hive/ql/optimizer/ppr/OpProcFactory.java | 3 +-
.../hive/ql/optimizer/ppr/PartitionPruner.java | 69 +-
.../annotation/AnnotateWithStatistics.java | 6 +-
.../stats/annotation/StatsRulesProcFactory.java | 46 +-
.../ql/optimizer/unionproc/UnionProcessor.java | 10 +-
.../apache/hadoop/hive/ql/parse/ASTNode.java | 139 +-
.../hadoop/hive/ql/parse/CalcitePlanner.java | 73 +-
.../ql/parse/ColumnStatsSemanticAnalyzer.java | 6 +-
.../hive/ql/parse/DDLSemanticAnalyzer.java | 11 +
.../hadoop/hive/ql/parse/GenMapRedWalker.java | 2 +-
.../hadoop/hive/ql/parse/GenTezWorkWalker.java | 2 +-
.../apache/hadoop/hive/ql/parse/HiveParser.g | 2 +
.../hadoop/hive/ql/parse/IdentifiersParser.g | 62 +-
.../hadoop/hive/ql/parse/LeadLagInfo.java | 4 +-
.../hive/ql/parse/LoadSemanticAnalyzer.java | 38 +-
.../apache/hadoop/hive/ql/parse/ParseUtils.java | 2 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 54 +-
.../apache/hadoop/hive/ql/parse/TezWalker.java | 2 +-
.../hive/ql/parse/TypeCheckProcFactory.java | 42 +-
.../hive/ql/parse/spark/GenSparkWorkWalker.java | 2 +-
.../hive/ql/plan/ExprNodeConstantDesc.java | 29 +-
.../hadoop/hive/ql/plan/ExprNodeDesc.java | 23 +-
.../apache/hadoop/hive/ql/plan/FilterDesc.java | 14 +-
.../org/apache/hadoop/hive/ql/plan/MapWork.java | 10 -
.../apache/hadoop/hive/ql/plan/PlanUtils.java | 9 +-
.../hadoop/hive/ql/ppd/ExprWalkerInfo.java | 136 +-
.../hive/ql/ppd/ExprWalkerProcFactory.java | 92 +-
.../hadoop/hive/ql/ppd/OpProcFactory.java | 11 +-
.../ql/ppd/PredicateTransitivePropagate.java | 4 +-
.../hive/ql/ppd/SyntheticJoinPredicate.java | 4 +-
.../hadoop/hive/ql/processors/SetProcessor.java | 4 +
.../sqlstd/SQLStdHiveAccessController.java | 5 +
.../hadoop/hive/ql/session/SessionState.java | 10 +-
.../apache/hadoop/hive/ql/stats/StatsUtils.java | 126 +-
.../hive/ql/txn/AcidHouseKeeperService.java | 104 +
.../hive/ql/txn/compactor/CompactorMR.java | 19 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 10 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 2 +-
.../hive/ql/udf/generic/GenericUDAFMax.java | 16 +-
.../hive/ql/udf/generic/GenericUDAFStd.java | 2 +
.../ql/udf/generic/GenericUDAFVariance.java | 2 +
.../hadoop/hive/ql/udf/generic/GenericUDF.java | 14 +-
.../hive/ql/udf/generic/GenericUDFAesBase.java | 205 +
.../ql/udf/generic/GenericUDFAesDecrypt.java | 50 +
.../ql/udf/generic/GenericUDFAesEncrypt.java | 50 +
.../hive/ql/udf/generic/GenericUDFBRound.java | 68 +
.../ql/udf/generic/GenericUDFBaseNumeric.java | 4 +-
.../hive/ql/udf/generic/GenericUDFBasePad.java | 8 +-
.../hive/ql/udf/generic/GenericUDFIn.java | 14 +-
.../hive/ql/udf/generic/GenericUDFNvl.java | 2 +-
.../hive/ql/udf/generic/GenericUDFOPAnd.java | 63 +-
.../hive/ql/udf/generic/GenericUDFOPEqual.java | 4 +
.../generic/GenericUDFOPEqualOrGreaterThan.java | 4 +
.../generic/GenericUDFOPEqualOrLessThan.java | 4 +
.../ql/udf/generic/GenericUDFOPGreaterThan.java | 4 +
.../ql/udf/generic/GenericUDFOPLessThan.java | 4 +
.../ql/udf/generic/GenericUDFOPNotEqual.java | 5 +
.../ql/udf/generic/GenericUDFOPNotNull.java | 4 +
.../hive/ql/udf/generic/GenericUDFOPNull.java | 4 +
.../hive/ql/udf/generic/GenericUDFOPOr.java | 63 +-
.../ql/udf/generic/GenericUDFParamUtils.java | 8 +-
.../hive/ql/udf/generic/GenericUDFRound.java | 41 +-
.../hive/ql/udf/generic/GenericUDFStruct.java | 25 +-
.../hadoop/hive/ql/udf/generic/RoundUtils.java | 14 +
.../main/resources/hive-exec-log4j.properties | 77 -
ql/src/main/resources/hive-exec-log4j2.xml | 109 +
ql/src/main/resources/tez-container-log4j2.xml | 48 +
.../apache/hadoop/hive/ql/TestTxnCommands.java | 21 +
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 84 +-
.../hadoop/hive/ql/exec/TestOperators.java | 16 +
.../exec/vector/TestVectorizationContext.java | 93 +
.../TestVectorLogicalExpressions.java | 282 +
.../apache/hadoop/hive/ql/io/TestAcidUtils.java | 27 +-
.../hive/ql/io/orc/TestInputOutputFormat.java | 118 +-
.../hadoop/hive/ql/io/orc/TestOrcFile.java | 10 +-
.../hadoop/hive/ql/io/orc/TestOrcStruct.java | 2 +
.../hive/ql/io/orc/TestRecordReaderImpl.java | 42 +-
.../parquet/TestParquetRecordReaderWrapper.java | 50 +-
.../read/TestParquetFilterPredicate.java | 27 +-
.../serde/TestParquetTimestampUtils.java | 38 +-
.../ql/io/sarg/TestConvertAstToSearchArg.java | 128 +-
.../hive/ql/io/sarg/TestSearchArgumentImpl.java | 22 +-
.../hive/ql/lockmgr/TestDbTxnManager.java | 74 +-
.../hive/ql/lockmgr/TestDbTxnManager2.java | 2 +-
.../hadoop/hive/ql/log/TestLog4j2Appenders.java | 95 +
.../hadoop/hive/ql/metadata/StringAppender.java | 128 +
.../hadoop/hive/ql/metadata/TestHive.java | 50 +-
...nMapRedUtilsUsePartitionColumnsNegative.java | 73 +
...nMapRedUtilsUsePartitionColumnsPositive.java | 61 +
.../TestSQL11ReservedKeyWordsNegative.java | 32 +-
.../TestSQL11ReservedKeyWordsPositive.java | 23 +-
.../udf/generic/TestGenericUDFAesDecrypt.java | 233 +
.../udf/generic/TestGenericUDFAesEncrypt.java | 228 +
.../ql/udf/generic/TestGenericUDFBRound.java | 202 +
.../clientnegative/alter_table_wrong_location.q | 4 +
.../queries/clientnegative/ctas_noemptyfolder.q | 10 +
.../queries/clientnegative/load_orc_negative3.q | 6 +
.../clientnegative/mismatch_columns_insertion.q | 4 +
.../queries/clientnegative/nvl_mismatch_type.q | 20 +
.../annotate_stats_deep_filters.q | 67 +
.../clientpositive/authorization_1_sql_std.q | 4 +
.../authorization_set_show_current_role.q | 3 +
.../clientpositive/bucket_map_join_tez1.q | 31 +
.../clientpositive/cast_tinyint_to_double.q | 7 +
.../queries/clientpositive/cbo_rp_auto_join17.q | 14 +
.../cbo_rp_cross_product_check_2.q | 31 +
ql/src/test/queries/clientpositive/cbo_rp_gby.q | 24 +
.../queries/clientpositive/cbo_rp_gby_empty.q | 30 +
.../test/queries/clientpositive/cbo_rp_insert.q | 17 +
.../test/queries/clientpositive/cbo_rp_join.q | 65 +
.../test/queries/clientpositive/cbo_rp_limit.q | 16 +
.../clientpositive/cbo_rp_outer_join_ppr.q | 40 +
.../queries/clientpositive/cbo_rp_semijoin.q | 17 +
.../clientpositive/cbo_rp_simple_select.q | 56 +
.../test/queries/clientpositive/cbo_rp_stats.q | 10 +
.../queries/clientpositive/cbo_rp_subq_exists.q | 67 +
.../queries/clientpositive/cbo_rp_subq_in.q | 56 +
.../queries/clientpositive/cbo_rp_subq_not_in.q | 81 +
.../queries/clientpositive/cbo_rp_udf_udaf.q | 20 +
.../test/queries/clientpositive/cbo_rp_union.q | 14 +
.../test/queries/clientpositive/cbo_rp_views.q | 46 +
.../queries/clientpositive/cbo_rp_windowing.q | 21 +
.../queries/clientpositive/cbo_rp_windowing_2.q | 439 +
ql/src/test/queries/clientpositive/char_udf1.q | 9 +-
.../clientpositive/columnstats_quoting.q | 8 +
.../queries/clientpositive/compustat_avro.q | 8 +-
.../test/queries/clientpositive/create_like.q | 12 +
.../test/queries/clientpositive/dynpart_merge.q | 28 +
.../queries/clientpositive/exchgpartition2lel.q | 32 +
.../queries/clientpositive/flatten_and_or.q | 19 +
.../queries/clientpositive/folder_predicate.q | 32 +
ql/src/test/queries/clientpositive/groupby13.q | 16 +
.../queries/clientpositive/groupby1_map_nomap.q | 2 +
ql/src/test/queries/clientpositive/groupby6.q | 2 +
.../clientpositive/groupby_grouping_id2.q | 2 +
.../clientpositive/groupby_ppr_multi_distinct.q | 2 +
ql/src/test/queries/clientpositive/having2.q | 27 +
.../clientpositive/insertoverwrite_bucket.q | 28 +
ql/src/test/queries/clientpositive/keyword_2.q | 14 +
ql/src/test/queries/clientpositive/lineage3.q | 22 +-
.../clientpositive/load_dyn_part14_win.q | 18 +-
.../test/queries/clientpositive/load_orc_part.q | 4 +
.../queries/clientpositive/macro_duplicate.q | 10 +
.../queries/clientpositive/multi_column_in.q | 71 +
.../test/queries/clientpositive/orc_file_dump.q | 57 +
.../test/queries/clientpositive/orc_ppd_basic.q | 177 +
.../parquet_mixed_partition_formats.q | 42 +
.../clientpositive/parquet_ppd_boolean.q | 35 +
.../queries/clientpositive/parquet_ppd_char.q | 76 +
.../queries/clientpositive/parquet_ppd_date.q | 101 +
.../clientpositive/parquet_ppd_decimal.q | 163 +
.../clientpositive/parquet_ppd_partition.q | 9 +
.../clientpositive/parquet_ppd_timestamp.q | 98 +
.../clientpositive/parquet_ppd_varchar.q | 76 +
.../clientpositive/parquet_predicate_pushdown.q | 301 +-
.../test/queries/clientpositive/pointlookup.q | 59 +
.../test/queries/clientpositive/pointlookup2.q | 51 +
.../test/queries/clientpositive/pointlookup3.q | 41 +
.../queries/clientpositive/ptfgroupbyjoin.q | 61 +
.../queries/clientpositive/selectDistinctStar.q | 2 +
ql/src/test/queries/clientpositive/structin.q | 23 +
.../queries/clientpositive/udf_aes_decrypt.q | 21 +
.../queries/clientpositive/udf_aes_encrypt.q | 21 +
ql/src/test/queries/clientpositive/udf_bround.q | 44 +
.../clientpositive/unionall_unbalancedppd.q | 3 +
.../test/queries/clientpositive/varchar_udf1.q | 6 +-
.../test/queries/clientpositive/vector_bround.q | 14 +
.../clientpositive/vector_cast_constant.q | 4 +-
.../queries/clientpositive/vectorized_casts.q | 6 +
.../queries/clientpositive/windowing_udaf.q | 4 +
.../alter_table_wrong_location.q.out | 9 +
.../clientnegative/char_pad_convert_fail0.q.out | 2 +-
.../clientnegative/char_pad_convert_fail1.q.out | 2 +-
.../clientnegative/char_pad_convert_fail3.q.out | 2 +-
.../clientnegative/ctas_noemptyfolder.q.out | 19 +
.../clientnegative/load_orc_negative3.q.out | 25 +
.../mismatch_columns_insertion.q.out | 9 +
.../clientnegative/nvl_mismatch_type.q.out | 43 +
.../alter_partition_coltype.q.out | 8 +-
.../annotate_stats_deep_filters.q.out | 244 +
.../clientpositive/annotate_stats_filter.q.out | 10 +-
.../clientpositive/annotate_stats_groupby.q.out | 106 +-
.../annotate_stats_groupby2.q.out | 28 +-
.../authorization_1_sql_std.q.out | 11 +
.../authorization_explain.q.java1.7.out | 2 +-
.../authorization_explain.q.java1.8.out | 2 +-
.../authorization_set_show_current_role.q.out | 8 +
.../results/clientpositive/auto_join18.q.out | 12 +-
.../auto_join18_multi_distinct.q.out | 12 +-
.../results/clientpositive/auto_join27.q.out | 18 +-
.../results/clientpositive/auto_join32.q.out | 4 +-
.../clientpositive/binarysortable_1.q.out | Bin 4329 -> 4325 bytes
.../clientpositive/cast_tinyint_to_double.q.out | 38 +
.../clientpositive/cbo_rp_auto_join17.q.out | 118 +
.../cbo_rp_cross_product_check_2.q.out | 699 +
.../results/clientpositive/cbo_rp_gby.q.out | 124 +
.../clientpositive/cbo_rp_gby_empty.q.out | 77 +
.../results/clientpositive/cbo_rp_insert.q.out | 89 +
.../results/clientpositive/cbo_rp_join.q.out | 15028 +++++++++++++++++
.../results/clientpositive/cbo_rp_limit.q.out | 90 +
.../cbo_rp_outer_join_ppr.q.java1.7.out | 855 +
.../clientpositive/cbo_rp_semijoin.q.out | 440 +
.../clientpositive/cbo_rp_simple_select.q.out | 755 +
.../results/clientpositive/cbo_rp_stats.q.out | 14 +
.../clientpositive/cbo_rp_subq_exists.q.out | 297 +
.../results/clientpositive/cbo_rp_subq_in.q.out | 151 +
.../clientpositive/cbo_rp_subq_not_in.q.out | 365 +
.../clientpositive/cbo_rp_udf_udaf.q.out | 125 +
.../results/clientpositive/cbo_rp_union.q.out | 920 +
.../results/clientpositive/cbo_rp_views.q.out | 237 +
.../clientpositive/cbo_rp_windowing.q.out | 293 +
.../clientpositive/cbo_rp_windowing_2.q.out | 2338 +++
.../clientpositive/char_udf1.q.java1.7.out | 22 +-
.../clientpositive/columnstats_quoting.q.out | 114 +
.../results/clientpositive/compustat_avro.q.out | 8 +-
.../clientpositive/constprog_partitioner.q.out | 30 +-
.../clientpositive/convert_enum_to_string.q.out | 9 +-
.../clientpositive/correlationoptimizer10.q.out | 48 +-
.../clientpositive/correlationoptimizer2.q.out | 220 +-
.../clientpositive/correlationoptimizer5.q.out | 6 +-
.../clientpositive/correlationoptimizer6.q.out | 232 +-
ql/src/test/results/clientpositive/count.q.out | 14 +-
.../results/clientpositive/create_like.q.out | 66 +
.../results/clientpositive/ctas_colname.q.out | 52 +-
.../test/results/clientpositive/database.q.out | 2 +-
.../clientpositive/decimal_precision.q.out | 4 +-
.../results/clientpositive/decimal_udf.q.out | 48 +-
.../results/clientpositive/distinct_stats.q.out | 14 +-
.../clientpositive/dynamic_rdd_cache.q.out | 58 +-
.../results/clientpositive/dynpart_merge.q.out | 99 +
.../dynpart_sort_opt_vectorization.q.out | 105 +-
.../dynpart_sort_optimization.q.out | 105 +-
...ryption_select_read_only_encrypted_tbl.q.out | 4 +-
.../clientpositive/exchgpartition2lel.q.out | 182 +
.../clientpositive/explain_dependency.q.out | 18 +-
.../clientpositive/explain_dependency2.q.out | 16 +-
.../clientpositive/explain_logical.q.out | 78 +-
.../clientpositive/fetch_aggregation.q.out | 4 +-
.../clientpositive/filter_cond_pushdown.q.out | 32 +-
.../clientpositive/filter_join_breaktask.q.out | 12 +-
.../results/clientpositive/flatten_and_or.q.out | 66 +
.../test/results/clientpositive/fold_when.q.out | 16 +-
.../clientpositive/folder_predicate.q.out | 368 +
.../test/results/clientpositive/gby_star.q.out | 54 +-
.../test/results/clientpositive/groupby12.q.out | 6 +-
.../test/results/clientpositive/groupby13.q.out | 86 +
.../clientpositive/groupby1_map_nomap.q.out | 8 +-
.../results/clientpositive/groupby5_map.q.out | 4 +-
.../clientpositive/groupby5_map_skew.q.out | 4 +-
.../test/results/clientpositive/groupby6.q.out | 8 +-
.../results/clientpositive/groupby_cube1.q.out | 12 +-
.../groupby_distinct_samekey.q.out | 6 +-
.../clientpositive/groupby_duplicate_key.q.out | 16 +-
.../clientpositive/groupby_grouping_id2.q.out | 28 +-
.../clientpositive/groupby_grouping_sets2.q.out | 10 +-
.../clientpositive/groupby_grouping_sets3.q.out | 12 +-
.../clientpositive/groupby_grouping_sets5.q.out | 8 +-
.../clientpositive/groupby_grouping_sets6.q.out | 8 +-
.../groupby_multi_single_reducer2.q.out | 2 +-
.../groupby_multi_single_reducer3.q.out | 12 +-
.../clientpositive/groupby_position.q.out | 36 +-
.../groupby_ppr_multi_distinct.q.out | 8 +-
.../clientpositive/groupby_resolution.q.out | 60 +-
.../clientpositive/groupby_rollup1.q.out | 12 +-
.../clientpositive/groupby_sort_10.q.out | 8 +-
.../clientpositive/groupby_sort_11.q.out | 10 +-
.../clientpositive/groupby_sort_1_23.q.out | 56 +-
.../results/clientpositive/groupby_sort_8.q.out | 12 +-
.../clientpositive/groupby_sort_skew_1_23.q.out | 56 +-
ql/src/test/results/clientpositive/having.q.out | 62 +-
.../test/results/clientpositive/having2.q.out | 365 +-
.../clientpositive/index_auto_mult_tables.q.out | 12 +-
.../clientpositive/index_auto_self_join.q.out | 12 +-
.../clientpositive/index_auto_update.q.out | 6 +-
.../index_bitmap_auto_partitioned.q.out | 6 +-
.../index_bitmap_compression.q.out | 6 +-
.../infer_bucket_sort_dyn_part.q.out | 4 +-
.../infer_bucket_sort_map_operators.q.out | 4 +-
.../clientpositive/infer_const_type.q.out | 7 +-
.../results/clientpositive/input4.q.java1.7.out | 2 +-
.../results/clientpositive/input4.q.java1.8.out | 2 +-
.../clientpositive/input_testxpath2.q.out | 2 +-
.../clientpositive/input_testxpath4.q.out | 2 +-
.../clientpositive/insertoverwrite_bucket.q.out | 104 +
.../results/clientpositive/join0.q.java1.7.out | 2 +-
.../results/clientpositive/join0.q.java1.8.out | 4 +-
ql/src/test/results/clientpositive/join18.q.out | 12 +-
.../clientpositive/join18_multi_distinct.q.out | 12 +-
ql/src/test/results/clientpositive/join31.q.out | 36 +-
ql/src/test/results/clientpositive/join32.q.out | 2 +-
.../clientpositive/join32_lessSize.q.out | 6 +-
ql/src/test/results/clientpositive/join33.q.out | 2 +-
.../join_cond_pushdown_unqual4.q.out | 2 +-
.../test/results/clientpositive/keyword_2.q.out | 51 +
.../limit_partition_metadataonly.q.out | 4 +-
.../results/clientpositive/limit_pushdown.q.out | 36 +-
.../test/results/clientpositive/lineage2.q.out | 12 +-
.../test/results/clientpositive/lineage3.q.out | 79 +-
.../list_bucket_dml_6.q.java1.7.out | 12 +-
.../list_bucket_dml_6.q.java1.8.out | 12 +-
.../clientpositive/list_bucket_dml_7.q.out | 12 +-
.../list_bucket_query_multiskew_3.q.out | 2 +-
.../list_bucket_query_oneskew_3.q.out | 6 +-
.../clientpositive/load_dyn_part14_win.q.out | 167 +-
.../results/clientpositive/load_orc_part.q.out | 18 +
.../clientpositive/macro_duplicate.q.out | 56 +
.../clientpositive/mapjoin_mapjoin.q.out | 32 +-
.../clientpositive/metadata_only_queries.q.out | 4 +-
.../results/clientpositive/metadataonly1.q.out | 112 +-
.../results/clientpositive/multiMapJoin2.q.out | 226 +-
.../clientpositive/multi_column_in.q.out | 410 +
.../results/clientpositive/multi_insert.q.out | 8 +-
.../clientpositive/multi_insert_gby.q.out | 2 +-
.../multi_insert_lateral_view.q.out | 4 +-
...i_insert_move_tasks_share_dependencies.q.out | 360 +-
.../nonblock_op_deduplicate.q.out | 8 +-
.../results/clientpositive/nonmr_fetch.q.out | 14 +-
.../test/results/clientpositive/null_cast.q.out | 6 +-
.../results/clientpositive/orc_file_dump.q.out | 447 +
.../clientpositive/orc_predicate_pushdown.q.out | 36 +-
.../results/clientpositive/parallel_join0.q.out | 2 +-
.../parquet_mixed_partition_formats.q.out | 303 +
.../clientpositive/parquet_ppd_boolean.q.out | 270 +
.../clientpositive/parquet_ppd_char.q.out | 308 +
.../clientpositive/parquet_ppd_date.q.out | 435 +
.../clientpositive/parquet_ppd_decimal.q.out | 768 +
.../clientpositive/parquet_ppd_partition.q.out | 47 +
.../clientpositive/parquet_ppd_timestamp.q.out | 422 +
.../clientpositive/parquet_ppd_varchar.q.out | 308 +
.../parquet_predicate_pushdown.q.out | 1307 +-
.../clientpositive/partition_multilevels.q.out | 8 +-
.../clientpositive/plan_json.q.java1.7.out | 2 +-
.../clientpositive/plan_json.q.java1.8.out | 2 +-
.../results/clientpositive/pointlookup.q.out | 198 +
.../results/clientpositive/pointlookup2.q.out | 1647 ++
.../results/clientpositive/pointlookup3.q.out | 1394 ++
.../test/results/clientpositive/ppd_gby.q.out | 12 +-
.../test/results/clientpositive/ppd_gby2.q.out | 60 +-
.../results/clientpositive/ppd_gby_join.q.out | 4 +-
.../test/results/clientpositive/ppd_join.q.out | 4 +-
.../test/results/clientpositive/ppd_join2.q.out | 22 +-
.../test/results/clientpositive/ppd_join3.q.out | 52 +-
.../clientpositive/ppd_join_filter.q.out | 98 +-
.../clientpositive/ppd_outer_join4.q.out | 2 +-
ql/src/test/results/clientpositive/ptf.q.out | 27 +-
.../results/clientpositive/ptfgroupbyjoin.q.out | 519 +
.../ql_rewrite_gbtoidx_cbo_1.q.out | 168 +-
.../ql_rewrite_gbtoidx_cbo_2.q.out | 94 +-
.../clientpositive/rand_partitionpruner3.q.out | 12 +-
.../reduce_deduplicate_extended.q.out | 32 +-
.../clientpositive/selectDistinctStar.q.out | 52 +-
.../clientpositive/select_unquote_not.q.out | 8 +-
.../results/clientpositive/show_functions.q.out | 3 +
.../clientpositive/spark/auto_join18.q.out | 10 +-
.../spark/auto_join18_multi_distinct.q.out | 12 +-
.../clientpositive/spark/auto_join27.q.out | 18 +-
.../clientpositive/spark/auto_join32.q.out | 53 +-
.../spark/bucket_map_join_tez1.q.out | 357 +
.../spark/constprog_partitioner.q.out | 30 +-
.../results/clientpositive/spark/count.q.out | 14 +-
.../spark/dynamic_rdd_cache.q.out | 6 +-
.../spark/filter_join_breaktask.q.out | 12 +-
.../spark/groupby1_map_nomap.q.out | 564 +-
.../clientpositive/spark/groupby5_map.q.out | 4 +-
.../spark/groupby5_map_skew.q.out | 4 +-
.../results/clientpositive/spark/groupby6.q.out | 20 +-
.../clientpositive/spark/groupby_cube1.q.out | 12 +-
.../spark/groupby_grouping_id2.q.out | 38 +-
.../spark/groupby_multi_single_reducer2.q.out | 2 +-
.../spark/groupby_multi_single_reducer3.q.out | 12 +-
.../clientpositive/spark/groupby_position.q.out | 18 +-
.../spark/groupby_ppr_multi_distinct.q.out | 16 +-
.../spark/groupby_resolution.q.out | 60 +-
.../clientpositive/spark/groupby_rollup1.q.out | 12 +-
.../spark/groupby_sort_1_23.q.out | 90 +-
.../spark/groupby_sort_skew_1_23.q.out | 90 +-
.../results/clientpositive/spark/having.q.out | 62 +-
.../spark/infer_bucket_sort_map_operators.q.out | 4 +-
.../results/clientpositive/spark/join18.q.out | 10 +-
.../spark/join18_multi_distinct.q.out | 12 +-
.../results/clientpositive/spark/join31.q.out | 36 +-
.../results/clientpositive/spark/join32.q.out | 2 +-
.../clientpositive/spark/join32_lessSize.q.out | 6 +-
.../results/clientpositive/spark/join33.q.out | 2 +-
.../spark/join_cond_pushdown_unqual4.q.out | 2 +-
.../spark/limit_partition_metadataonly.q.out | 4 +-
.../clientpositive/spark/limit_pushdown.q.out | 34 +-
.../clientpositive/spark/mapjoin_mapjoin.q.out | 24 +-
.../spark/metadata_only_queries.q.out | 4 +-
.../clientpositive/spark/multi_insert.q.out | 8 +-
.../clientpositive/spark/multi_insert_gby.q.out | 2 +-
.../spark/multi_insert_lateral_view.q.out | 4 +-
...i_insert_move_tasks_share_dependencies.q.out | 536 +-
.../clientpositive/spark/ppd_gby_join.q.out | 4 +-
.../results/clientpositive/spark/ppd_join.q.out | 4 +-
.../clientpositive/spark/ppd_join2.q.out | 22 +-
.../clientpositive/spark/ppd_join3.q.out | 52 +-
.../clientpositive/spark/ppd_join_filter.q.out | 90 +-
.../clientpositive/spark/ppd_outer_join4.q.out | 2 +-
.../test/results/clientpositive/spark/ptf.q.out | 17 +-
.../spark/ql_rewrite_gbtoidx_cbo_1.q.out | 168 +-
.../clientpositive/spark/stats_only_null.q.out | 8 +-
.../clientpositive/spark/subquery_exists.q.out | 12 +-
.../clientpositive/spark/subquery_in.q.out | 72 +-
.../spark/subquery_multiinsert.q.java1.7.out | 16 +-
.../results/clientpositive/spark/union11.q.out | 42 +-
.../results/clientpositive/spark/union14.q.out | 28 +-
.../results/clientpositive/spark/union15.q.out | 28 +-
.../results/clientpositive/spark/union28.q.out | 4 +-
.../results/clientpositive/spark/union30.q.out | 4 +-
.../results/clientpositive/spark/union33.q.out | 8 +-
.../results/clientpositive/spark/union5.q.out | 34 +-
.../results/clientpositive/spark/union7.q.out | 28 +-
.../clientpositive/spark/union_remove_21.q.out | 4 +-
.../spark/union_remove_6_subq.q.out | 22 +-
.../spark/vector_cast_constant.q.java1.7.out | 41 +-
.../spark/vector_count_distinct.q.out | 4 +-
.../spark/vector_decimal_aggregate.q.out | 12 +-
.../spark/vector_distinct_2.q.out | 28 +-
.../clientpositive/spark/vector_groupby_3.q.out | 30 +-
.../spark/vector_mapjoin_reduce.q.out | 58 +-
.../clientpositive/spark/vector_orderby_5.q.out | 6 +-
.../clientpositive/spark/vectorization_0.q.out | 18 +-
.../clientpositive/spark/vectorization_13.q.out | 36 +-
.../clientpositive/spark/vectorization_15.q.out | 18 +-
.../clientpositive/spark/vectorization_16.q.out | 16 +-
.../clientpositive/spark/vectorization_17.q.out | 12 +-
.../clientpositive/spark/vectorization_9.q.out | 16 +-
.../spark/vectorization_pushdown.q.out | 4 +-
.../spark/vectorization_short_regress.q.out | 114 +-
.../clientpositive/spark/vectorized_case.q.out | 2 +-
.../spark/vectorized_nested_mapjoin.q.out | 18 +-
.../clientpositive/spark/vectorized_ptf.q.out | 21 +-
.../spark/vectorized_timestamp_funcs.q.out | 12 +-
.../clientpositive/stats_only_null.q.out | 8 +-
.../results/clientpositive/stats_ppr_all.q.out | 16 +-
.../test/results/clientpositive/structin.q.out | 110 +
.../subq_where_serialization.q.out | 18 +-
.../clientpositive/subquery_exists.q.out | 12 +-
.../clientpositive/subquery_exists_having.q.out | 48 +-
.../results/clientpositive/subquery_in.q.out | 72 +-
.../clientpositive/subquery_in_having.q.out | 310 +-
.../subquery_multiinsert.q.java1.7.out | 16 +-
.../clientpositive/subquery_notexists.q.out | 18 +-
.../subquery_notexists_having.q.out | 26 +-
.../results/clientpositive/subquery_notin.q.out | 24 +-
.../subquery_notin_having.q.java1.7.out | 50 +-
.../subquery_unqualcolumnrefs.q.out | 100 +-
.../results/clientpositive/subquery_views.q.out | 40 +-
.../tez/bucket_map_join_tez1.q.out | 333 +
.../clientpositive/tez/constprog_dpp.q.out | 4 +-
.../test/results/clientpositive/tez/count.q.out | 14 +-
.../tez/dynamic_partition_pruning.q.out | 88 +-
.../tez/dynpart_sort_opt_vectorization.q.out | 90 +-
.../tez/dynpart_sort_optimization.q.out | 89 +-
.../clientpositive/tez/explainuser_1.q.out | 3083 ++--
.../clientpositive/tez/explainuser_2.q.out | 4016 ++---
.../clientpositive/tez/explainuser_3.q.out | 10 +-
.../tez/filter_join_breaktask.q.out | 12 +-
.../results/clientpositive/tez/having.q.out | 62 +-
.../clientpositive/tez/limit_pushdown.q.out | 34 +-
.../clientpositive/tez/mapjoin_mapjoin.q.out | 24 +-
.../tez/metadata_only_queries.q.out | 4 +-
.../clientpositive/tez/metadataonly1.q.out | 44 +-
.../test/results/clientpositive/tez/mrr.q.out | 94 +-
.../clientpositive/tez/orc_ppd_basic.q.out | 701 +
.../test/results/clientpositive/tez/ptf.q.out | 15 +-
.../clientpositive/tez/selectDistinctStar.q.out | 52 +-
.../clientpositive/tez/stats_only_null.q.out | 8 +-
.../clientpositive/tez/subquery_exists.q.out | 12 +-
.../clientpositive/tez/subquery_in.q.out | 72 +-
.../results/clientpositive/tez/tez_dml.q.out | 6 +-
.../results/clientpositive/tez/union5.q.out | 44 +-
.../results/clientpositive/tez/union7.q.out | 28 +-
.../clientpositive/tez/unionDistinct_1.q.out | 8 +-
.../clientpositive/tez/vector_aggregate_9.q.out | 4 +-
.../tez/vector_binary_join_groupby.q.out | 4 +-
.../tez/vector_cast_constant.q.java1.7.out | 21 +-
.../tez/vector_char_mapjoin1.q.out | 1 +
.../tez/vector_count_distinct.q.out | 4 +-
.../tez/vector_decimal_aggregate.q.out | 12 +-
.../tez/vector_decimal_precision.q.out | 4 +-
.../clientpositive/tez/vector_decimal_udf.q.out | 54 +-
.../clientpositive/tez/vector_distinct_2.q.out | 28 +-
.../clientpositive/tez/vector_groupby_3.q.out | 30 +-
.../tez/vector_groupby_reduce.q.out | 8 +-
.../tez/vector_grouping_sets.q.out | 8 +-
.../clientpositive/tez/vector_inner_join.q.out | 14 +-
.../tez/vector_mapjoin_reduce.q.out | 60 +-
.../tez/vector_mr_diff_schema_alias.q.out | 2 +-
.../clientpositive/tez/vector_orderby_5.q.out | 6 +-
.../clientpositive/tez/vector_outer_join2.q.out | 20 +-
.../tez/vector_partition_diff_num_cols.q.out | 20 +-
.../tez/vector_partitioned_date_time.q.out | 12 +-
.../tez/vector_reduce_groupby_decimal.q.out | 24 +-
.../tez/vector_varchar_mapjoin1.q.out | 1 +
.../clientpositive/tez/vectorization_0.q.out | 18 +-
.../clientpositive/tez/vectorization_13.q.out | 36 +-
.../clientpositive/tez/vectorization_15.q.out | 18 +-
.../clientpositive/tez/vectorization_16.q.out | 16 +-
.../clientpositive/tez/vectorization_17.q.out | 12 +-
.../clientpositive/tez/vectorization_7.q.out | 4 +-
.../clientpositive/tez/vectorization_8.q.out | 4 +-
.../clientpositive/tez/vectorization_9.q.out | 16 +-
.../tez/vectorization_limit.q.out | 14 +-
.../tez/vectorization_pushdown.q.out | 4 +-
.../tez/vectorization_short_regress.q.out | 114 +-
.../clientpositive/tez/vectorized_case.q.out | 2 +-
.../clientpositive/tez/vectorized_casts.q.out | 99 +-
.../tez/vectorized_distinct_gby.q.out | 8 +-
.../vectorized_dynamic_partition_pruning.q.out | 88 +-
.../tez/vectorized_nested_mapjoin.q.out | 18 +-
.../clientpositive/tez/vectorized_parquet.q.out | 6 +-
.../clientpositive/tez/vectorized_ptf.q.out | 19 +-
.../tez/vectorized_timestamp_funcs.q.out | 12 +-
ql/src/test/results/clientpositive/udf8.q.out | 4 +-
.../clientpositive/udf_aes_decrypt.q.out | 79 +
.../clientpositive/udf_aes_encrypt.q.out | 79 +
.../results/clientpositive/udf_bround.q.out | 119 +
.../test/results/clientpositive/udf_count.q.out | 16 +-
.../results/clientpositive/udf_inline.q.out | 8 +-
.../clientpositive/udf_isnull_isnotnull.q.out | 2 +-
ql/src/test/results/clientpositive/udf_or.q.out | 4 +-
.../test/results/clientpositive/udf_size.q.out | 2 +-
.../results/clientpositive/udf_struct.q.out | 2 +-
.../test/results/clientpositive/udf_union.q.out | 2 +-
.../test/results/clientpositive/union11.q.out | 70 +-
.../test/results/clientpositive/union14.q.out | 32 +-
.../test/results/clientpositive/union15.q.out | 38 +-
.../test/results/clientpositive/union28.q.out | 8 +-
.../test/results/clientpositive/union30.q.out | 8 +-
.../test/results/clientpositive/union33.q.out | 8 +-
ql/src/test/results/clientpositive/union5.q.out | 48 +-
ql/src/test/results/clientpositive/union7.q.out | 32 +-
.../clientpositive/unionDistinct_1.q.out | 8 +-
.../clientpositive/union_remove_21.q.out | 8 +-
.../clientpositive/union_remove_6_subq.q.out | 34 +-
.../clientpositive/unionall_unbalancedppd.q.out | 14 +-
.../clientpositive/varchar_udf1.q.java1.7.out | 12 +-
.../clientpositive/vector_aggregate_9.q.out | 4 +-
.../vector_aggregate_without_gby.q.out | 4 +-
.../vector_binary_join_groupby.q.out | 4 +-
.../results/clientpositive/vector_bround.q.out | 86 +
.../vector_cast_constant.q.java1.7.out | 32 +-
.../clientpositive/vector_char_mapjoin1.q.out | 1 +
.../clientpositive/vector_count_distinct.q.out | 6 +-
.../vector_decimal_aggregate.q.out | 12 +-
.../vector_decimal_precision.q.out | 4 +-
.../clientpositive/vector_decimal_udf.q.out | 54 +-
.../clientpositive/vector_distinct_2.q.out | 28 +-
.../clientpositive/vector_groupby_3.q.out | 30 +-
.../clientpositive/vector_groupby_reduce.q.out | 8 +-
.../clientpositive/vector_grouping_sets.q.out | 8 +-
.../clientpositive/vector_inner_join.q.out | 12 +-
.../clientpositive/vector_left_outer_join.q.out | 8 +-
.../clientpositive/vector_mapjoin_reduce.q.out | 62 +-
.../vector_mr_diff_schema_alias.q.out | 2 +-
.../clientpositive/vector_orderby_5.q.out | 6 +-
.../clientpositive/vector_outer_join1.q.out | 8 +-
.../clientpositive/vector_outer_join2.q.out | 28 +-
.../clientpositive/vector_outer_join3.q.out | 24 +-
.../clientpositive/vector_outer_join4.q.out | 8 +-
.../clientpositive/vector_outer_join5.q.out | 48 +-
.../vector_partition_diff_num_cols.q.out | 20 +-
.../vector_partitioned_date_time.q.out | 12 +-
.../vector_reduce_groupby_decimal.q.out | 24 +-
.../vector_varchar_mapjoin1.q.out | 1 +
.../clientpositive/vectorization_0.q.out | 18 +-
.../clientpositive/vectorization_13.q.out | 36 +-
.../clientpositive/vectorization_15.q.out | 18 +-
.../clientpositive/vectorization_16.q.out | 16 +-
.../clientpositive/vectorization_17.q.out | 12 +-
.../clientpositive/vectorization_7.q.out | 4 +-
.../clientpositive/vectorization_8.q.out | 4 +-
.../clientpositive/vectorization_9.q.out | 16 +-
.../clientpositive/vectorization_limit.q.out | 16 +-
.../clientpositive/vectorization_pushdown.q.out | 4 +-
.../vectorization_short_regress.q.out | 114 +-
.../clientpositive/vectorized_case.q.out | 2 +-
.../clientpositive/vectorized_casts.q.out | 66 +-
.../vectorized_distinct_gby.q.out | 12 +-
.../vectorized_nested_mapjoin.q.out | 26 +-
.../clientpositive/vectorized_parquet.q.out | 6 +-
.../vectorized_parquet_types.q.out | 6 +-
.../results/clientpositive/vectorized_ptf.q.out | 67 +-
.../vectorized_timestamp_funcs.q.out | 12 +-
.../results/clientpositive/windowing_udaf.q.out | 12 +
.../hadoop/hive/serde/test/InnerStruct.java | 2 +-
.../hadoop/hive/serde/test/ThriftTestObj.java | 2 +-
.../hadoop/hive/serde2/thrift/test/Complex.java | 2 +-
.../hive/serde2/thrift/test/IntString.java | 2 +-
.../hive/serde2/thrift/test/MegaStruct.java | 2 +-
.../hive/serde2/thrift/test/MiniStruct.java | 2 +-
.../hive/serde2/thrift/test/SetIntString.java | 2 +-
.../hive/serde2/ColumnProjectionUtils.java | 22 +
.../apache/hadoop/hive/serde2/WriteBuffers.java | 12 +-
.../serde2/avro/AvroLazyObjectInspector.java | 19 +-
.../hadoop/hive/serde2/avro/InstanceCache.java | 17 +-
.../hadoop/hive/serde2/lazy/LazyByte.java | 4 +
.../hadoop/hive/serde2/lazy/LazyDouble.java | 4 +
.../hadoop/hive/serde2/lazy/LazyFloat.java | 4 +
.../hadoop/hive/serde2/lazy/LazyInteger.java | 4 +
.../hadoop/hive/serde2/lazy/LazyLong.java | 4 +
.../hadoop/hive/serde2/lazy/LazyShort.java | 4 +
.../hadoop/hive/serde2/lazy/LazyUtils.java | 28 +
.../ObjectInspectorConverters.java | 31 +-
.../objectinspector/ObjectInspectorFactory.java | 68 +-
.../objectinspector/ObjectInspectorUtils.java | 19 +
.../ReflectionStructObjectInspector.java | 60 +-
.../SettableUnionObjectInspector.java | 4 +-
.../StandardConstantStructObjectInspector.java | 51 +
.../StandardStructObjectInspector.java | 7 +-
.../StandardUnionObjectInspector.java | 4 +-
.../ThriftUnionObjectInspector.java | 44 +-
.../hive/serde2/typeinfo/TypeInfoUtils.java | 2 +-
.../avro/TestAvroLazyObjectInspector.java | 59 +
.../TestObjectInspectorConverters.java | 89 +-
.../TestReflectionObjectInspectors.java | 71 +-
.../TestThriftObjectInspectors.java | 10 +-
.../hadoop/hive/service/HiveClusterStatus.java | 2 +-
.../hive/service/HiveServerException.java | 2 +-
.../apache/hadoop/hive/service/ThriftHive.java | 2 +-
.../service/cli/thrift/TArrayTypeEntry.java | 2 +-
.../hive/service/cli/thrift/TBinaryColumn.java | 2 +-
.../hive/service/cli/thrift/TBoolColumn.java | 2 +-
.../hive/service/cli/thrift/TBoolValue.java | 2 +-
.../hive/service/cli/thrift/TByteColumn.java | 2 +-
.../hive/service/cli/thrift/TByteValue.java | 2 +-
.../hive/service/cli/thrift/TCLIService.java | 2 +-
.../cli/thrift/TCancelDelegationTokenReq.java | 2 +-
.../cli/thrift/TCancelDelegationTokenResp.java | 2 +-
.../service/cli/thrift/TCancelOperationReq.java | 2 +-
.../cli/thrift/TCancelOperationResp.java | 2 +-
.../service/cli/thrift/TCloseOperationReq.java | 2 +-
.../service/cli/thrift/TCloseOperationResp.java | 2 +-
.../service/cli/thrift/TCloseSessionReq.java | 2 +-
.../service/cli/thrift/TCloseSessionResp.java | 2 +-
.../hive/service/cli/thrift/TColumnDesc.java | 2 +-
.../hive/service/cli/thrift/TDoubleColumn.java | 2 +-
.../hive/service/cli/thrift/TDoubleValue.java | 2 +-
.../cli/thrift/TExecuteStatementReq.java | 2 +-
.../cli/thrift/TExecuteStatementResp.java | 2 +-
.../service/cli/thrift/TFetchResultsReq.java | 2 +-
.../service/cli/thrift/TFetchResultsResp.java | 2 +-
.../service/cli/thrift/TGetCatalogsReq.java | 2 +-
.../service/cli/thrift/TGetCatalogsResp.java | 2 +-
.../hive/service/cli/thrift/TGetColumnsReq.java | 2 +-
.../service/cli/thrift/TGetColumnsResp.java | 2 +-
.../cli/thrift/TGetDelegationTokenReq.java | 2 +-
.../cli/thrift/TGetDelegationTokenResp.java | 2 +-
.../service/cli/thrift/TGetFunctionsReq.java | 2 +-
.../service/cli/thrift/TGetFunctionsResp.java | 2 +-
.../hive/service/cli/thrift/TGetInfoReq.java | 2 +-
.../hive/service/cli/thrift/TGetInfoResp.java | 2 +-
.../cli/thrift/TGetOperationStatusReq.java | 2 +-
.../cli/thrift/TGetOperationStatusResp.java | 2 +-
.../cli/thrift/TGetResultSetMetadataReq.java | 2 +-
.../cli/thrift/TGetResultSetMetadataResp.java | 2 +-
.../hive/service/cli/thrift/TGetSchemasReq.java | 2 +-
.../service/cli/thrift/TGetSchemasResp.java | 2 +-
.../service/cli/thrift/TGetTableTypesReq.java | 2 +-
.../service/cli/thrift/TGetTableTypesResp.java | 2 +-
.../hive/service/cli/thrift/TGetTablesReq.java | 2 +-
.../hive/service/cli/thrift/TGetTablesResp.java | 2 +-
.../service/cli/thrift/TGetTypeInfoReq.java | 2 +-
.../service/cli/thrift/TGetTypeInfoResp.java | 2 +-
.../service/cli/thrift/THandleIdentifier.java | 2 +-
.../hive/service/cli/thrift/TI16Column.java | 2 +-
.../hive/service/cli/thrift/TI16Value.java | 2 +-
.../hive/service/cli/thrift/TI32Column.java | 2 +-
.../hive/service/cli/thrift/TI32Value.java | 2 +-
.../hive/service/cli/thrift/TI64Column.java | 2 +-
.../hive/service/cli/thrift/TI64Value.java | 2 +-
.../hive/service/cli/thrift/TMapTypeEntry.java | 2 +-
.../service/cli/thrift/TOpenSessionReq.java | 2 +-
.../service/cli/thrift/TOpenSessionResp.java | 2 +-
.../service/cli/thrift/TOperationHandle.java | 2 +-
.../service/cli/thrift/TPrimitiveTypeEntry.java | 2 +-
.../cli/thrift/TRenewDelegationTokenReq.java | 2 +-
.../cli/thrift/TRenewDelegationTokenResp.java | 2 +-
.../apache/hive/service/cli/thrift/TRow.java | 2 +-
.../apache/hive/service/cli/thrift/TRowSet.java | 2 +-
.../hive/service/cli/thrift/TSessionHandle.java | 2 +-
.../apache/hive/service/cli/thrift/TStatus.java | 2 +-
.../hive/service/cli/thrift/TStringColumn.java | 2 +-
.../hive/service/cli/thrift/TStringValue.java | 2 +-
.../service/cli/thrift/TStructTypeEntry.java | 2 +-
.../hive/service/cli/thrift/TTableSchema.java | 2 +-
.../hive/service/cli/thrift/TTypeDesc.java | 2 +-
.../service/cli/thrift/TTypeQualifiers.java | 2 +-
.../service/cli/thrift/TUnionTypeEntry.java | 2 +-
.../cli/thrift/TUserDefinedTypeEntry.java | 2 +-
.../gen-py/hive_service/ThriftHive-remote | 21 +-
.../hive/service/cli/CLIServiceUtils.java | 7 -
.../org/apache/hive/service/cli/Column.java | 2 +-
.../cli/operation/GetColumnsOperation.java | 10 +-
.../cli/operation/GetTablesOperation.java | 7 +-
.../cli/operation/LogDivertAppender.java | 223 +-
.../service/cli/operation/OperationManager.java | 17 +-
.../service/cli/session/SessionManager.java | 42 +-
.../thrift/RetryingThriftCLIServiceClient.java | 331 +
.../apache/hive/service/server/HiveServer2.java | 74 +-
.../org/apache/hive/service/cli/TestColumn.java | 129 +
.../cli/TestRetryingThriftCLIServiceClient.java | 133 +
.../session/TestPluggableHiveSessionImpl.java | 55 +
shims/0.20S/pom.xml | 8 +-
.../hadoop/hive/shims/Hadoop20SShims.java | 46 +-
shims/0.23/pom.xml | 21 +-
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 145 +-
shims/common/pom.xml | 17 +-
.../apache/hadoop/hive/shims/HadoopShims.java | 37 +
.../hadoop/hive/shims/HadoopShimsSecure.java | 32 +
.../hadoop/hive/shims/HiveEventCounter.java | 102 -
.../hive/spark/client/SparkClientImpl.java | 5 +-
.../src/test/resources/log4j.properties | 23 -
spark-client/src/test/resources/log4j2.xml | 39 +
storage-api/pom.xml | 7 -
.../hadoop/hive/common/type/HiveDecimal.java | 1 +
.../hadoop/hive/ql/io/sarg/PredicateLeaf.java | 3 +-
.../hive/ql/io/sarg/SearchArgumentFactory.java | 5 +-
.../hive/ql/io/sarg/SearchArgumentImpl.java | 7 +-
testutils/ptest2/pom.xml | 32 +-
.../ptest2/src/main/resources/log4j.properties | 37 -
testutils/ptest2/src/main/resources/log4j2.xml | 79 +
1119 files changed, 67101 insertions(+), 16669 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/70eeadd2/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --cc common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 73610dc,7f29da2..4ce21a3
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@@ -765,8 -765,10 +765,10 @@@ public class HiveConf extends Configura
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
- "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
- "because memory-optimized hashtable cannot be serialized."),
+ "Whether Hive should use memory-optimized hash table for MapJoin.\n" +
+ "Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
+ HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
+ (float) 0.5, "Probing space percentage of the optimized hashtable"),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
http://git-wip-us.apache.org/repos/asf/hive/blob/70eeadd2/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/70eeadd2/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 4f66cd6,2e3bd76..1064bd2
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@@ -155,9 -155,7 +155,8 @@@ public class Vectorizer implements Phys
Set<String> supportedAggregationUdfs = new HashSet<String>();
- private PhysicalContext physicalContext = null;
private HiveConf hiveConf;
+ private boolean isSpark;
public Vectorizer() {
[22/23] hive git commit: HIVE-12434: Merge branch 'spark' to master
Posted by xu...@apache.org.
HIVE-12434: Merge branch 'spark' to master
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/de1b22ff
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/de1b22ff
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/de1b22ff
Branch: refs/heads/master
Commit: de1b22ff18de42c166d3e5f6550dc4404d3f1040
Parents: 22499db 50b62ca
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Nov 18 13:38:52 2015 -0800
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Nov 18 13:38:52 2015 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 4 +-
.../src/test/templates/TestHBaseCliDriver.vm | 63 +-
.../templates/TestHBaseNegativeCliDriver.vm | 64 +-
.../test/resources/testconfiguration.properties | 20 +-
.../hadoop/hive/hbase/HBaseTestSetup.java | 9 +-
pom.xml | 12 +-
.../hadoop/hive/ql/exec/ScriptOperator.java | 15 +
.../persistence/MapJoinTableContainerSerDe.java | 70 +
.../hive/ql/exec/spark/HashTableLoader.java | 18 +-
.../ql/exec/spark/RemoteHiveSparkClient.java | 4 +-
.../hive/ql/exec/spark/SparkPlanGenerator.java | 17 +
.../hive/ql/exec/spark/SparkUtilities.java | 10 +-
.../spark/status/impl/JobMetricsListener.java | 89 +-
.../mapjoin/VectorMapJoinCommonOperator.java | 4 +-
.../fast/VectorMapJoinFastTableContainer.java | 2 +-
.../hive/ql/optimizer/physical/Vectorizer.java | 4 +-
.../optimizer/spark/SparkMapJoinOptimizer.java | 10 +
.../ql/parse/spark/GenSparkProcContext.java | 2 -
.../hive/ql/parse/spark/GenSparkWork.java | 7 -
ql/src/test/queries/clientpositive/orc_merge1.q | 2 +
ql/src/test/queries/clientpositive/orc_merge2.q | 1 +
ql/src/test/queries/clientpositive/orc_merge3.q | 1 +
ql/src/test/queries/clientpositive/orc_merge4.q | 2 +
ql/src/test/queries/clientpositive/orc_merge5.q | 3 +
ql/src/test/queries/clientpositive/orc_merge6.q | 3 +
ql/src/test/queries/clientpositive/orc_merge7.q | 3 +
ql/src/test/queries/clientpositive/orc_merge8.q | 2 +
ql/src/test/queries/clientpositive/orc_merge9.q | 1 +
.../clientpositive/orc_merge_incompat1.q | 1 +
.../clientpositive/orc_merge_incompat2.q | 1 +
.../clientpositive/spark/orc_merge1.q.out | 485 ++++++
.../clientpositive/spark/orc_merge2.q.out | 268 ++++
.../clientpositive/spark/orc_merge3.q.out | 207 +++
.../clientpositive/spark/orc_merge4.q.out | 231 +++
.../clientpositive/spark/orc_merge5.q.out | 334 +++++
.../clientpositive/spark/orc_merge6.q.out | 508 +++++++
.../clientpositive/spark/orc_merge7.q.out | 619 ++++++++
.../clientpositive/spark/orc_merge8.q.out | 130 ++
.../clientpositive/spark/orc_merge9.q.out | 186 +++
.../spark/orc_merge_incompat1.q.out | 240 +++
.../spark/orc_merge_incompat2.q.out | 370 +++++
.../spark/vector_inner_join.q.out | 853 +++++++++++
.../spark/vector_outer_join0.q.out | 242 +++
.../spark/vector_outer_join1.q.out | 631 ++++++++
.../spark/vector_outer_join2.q.out | 323 ++++
.../spark/vector_outer_join3.q.out | 630 ++++++++
.../spark/vector_outer_join4.q.out | 1000 +++++++++++++
.../spark/vector_outer_join5.q.out | 1406 ++++++++++++++++++
ql/src/test/templates/TestCliDriver.vm | 74 +-
ql/src/test/templates/TestCompareCliDriver.vm | 71 +-
ql/src/test/templates/TestNegativeCliDriver.vm | 70 +-
ql/src/test/templates/TestParseNegative.vm | 65 +-
.../apache/hive/spark/client/JobContext.java | 4 +-
.../hive/spark/client/JobContextImpl.java | 8 +-
.../apache/hive/spark/client/RemoteDriver.java | 60 +-
.../hive/spark/client/SparkClientImpl.java | 2 +-
.../hive/spark/client/SparkClientUtilities.java | 25 +-
.../ptest2/src/main/resources/batch-exec.vm | 2 -
58 files changed, 9000 insertions(+), 488 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/de1b22ff/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/de1b22ff/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/de1b22ff/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/de1b22ff/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
[18/23] hive git commit: HIVE-12229: Custom script in query cannot be
executed in yarn-cluster mode [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
HIVE-12229: Custom script in query cannot be executed in yarn-cluster mode [Spark Branch] (Rui reviewed by Xuefu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b02cd4ab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b02cd4ab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b02cd4ab
Branch: refs/heads/master
Commit: b02cd4abce10003dc90646b710875fba00b9b5b0
Parents: fd11929
Author: Rui Li <ru...@intel.com>
Authored: Thu Nov 5 16:48:25 2015 +0800
Committer: Rui Li <ru...@intel.com>
Committed: Thu Nov 5 16:51:22 2015 +0800
----------------------------------------------------------------------
.../hadoop/hive/ql/exec/ScriptOperator.java | 15 ++++++++++++
.../ql/exec/spark/RemoteHiveSparkClient.java | 4 ++--
.../hive/ql/exec/spark/SparkUtilities.java | 10 ++++----
.../apache/hive/spark/client/JobContext.java | 4 ++--
.../hive/spark/client/JobContextImpl.java | 8 +++----
.../hive/spark/client/SparkClientImpl.java | 2 +-
.../hive/spark/client/SparkClientUtilities.java | 24 +++++++++++++-------
7 files changed, 43 insertions(+), 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
index 5df9ea2..63837ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
+import org.apache.spark.SparkConf;
+import org.apache.spark.SparkEnv;
import org.apache.spark.SparkFiles;
import java.io.BufferedInputStream;
@@ -329,6 +331,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
// initialize the user's process only when you receive the first row
if (firstRow) {
firstRow = false;
+ SparkConf sparkConf = null;
try {
String[] cmdArgs = splitArgs(conf.getScriptCmd());
@@ -341,6 +344,7 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
// In spark local mode, we need to search added files in root directory.
if (HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+ sparkConf = SparkEnv.get().conf();
finder.prependPathComponent(SparkFiles.getRootDirectory());
}
File f = finder.getAbsolutePath(prog);
@@ -371,6 +375,17 @@ public class ScriptOperator extends Operator<ScriptDesc> implements
String idEnvVarVal = getOperatorId();
env.put(safeEnvVarName(idEnvVarName), idEnvVarVal);
+ // For spark, in non-local mode, any added dependencies are stored at
+ // SparkFiles::getRootDirectory, which is the executor's working directory.
+ // In local mode, we need to manually point the process's working directory to it,
+ // in order to make the dependencies accessible.
+ if (sparkConf != null) {
+ String master = sparkConf.get("spark.master");
+ if (master.equals("local") || master.startsWith("local[")) {
+ pb.directory(new File(SparkFiles.getRootDirectory()));
+ }
+ }
+
scriptPid = pb.start(); // Runtime.getRuntime().exec(wrappedCmdArgs);
DataOutputStream scriptOut = new DataOutputStream(
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
index 2e8d1d3..cf81424 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
@@ -295,11 +295,11 @@ public class RemoteHiveSparkClient implements HiveSparkClient {
// Add jar to current thread class loader dynamically, and add jar paths to JobConf as Spark
// may need to load classes from this jar in other threads.
- Set<String> addedJars = jc.getAddedJars();
+ Map<String, Long> addedJars = jc.getAddedJars();
if (addedJars != null && !addedJars.isEmpty()) {
SparkClientUtilities.addToClassPath(addedJars, localJobConf, jc.getLocalTmpDir());
KryoSerializer.setClassLoader(Thread.currentThread().getContextClassLoader());
- localJobConf.set(Utilities.HIVE_ADDED_JARS, StringUtils.join(addedJars, ";"));
+ localJobConf.set(Utilities.HIVE_ADDED_JARS, StringUtils.join(addedJars.keySet(), ";"));
}
Path localScratchDir = KryoSerializer.deserialize(scratchDirBytes, Path.class);
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
index cf2c3bc..0268469 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java
@@ -22,8 +22,6 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
-import java.util.UUID;
-import java.util.Collection;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FilenameUtils;
@@ -91,11 +89,11 @@ public class SparkUtilities {
*/
public static URI uploadToHDFS(URI source, HiveConf conf) throws IOException {
Path localFile = new Path(source.getPath());
- // give the uploaded file a UUID
- Path remoteFile = new Path(SessionState.getHDFSSessionPath(conf),
- UUID.randomUUID() + "-" + getFileName(source));
+ Path remoteFile = new Path(SessionState.getHDFSSessionPath(conf), getFileName(source));
FileSystem fileSystem = FileSystem.get(conf);
- fileSystem.copyFromLocalFile(localFile, remoteFile);
+ // Overwrite if the remote file already exists. Whether the file can be added
+ // on executor is up to spark, i.e. spark.files.overwrite
+ fileSystem.copyFromLocalFile(false, true, localFile, remoteFile);
Path fullPath = fileSystem.getFileStatus(remoteFile).getPath();
return fullPath.toUri();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/spark-client/src/main/java/org/apache/hive/spark/client/JobContext.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/JobContext.java b/spark-client/src/main/java/org/apache/hive/spark/client/JobContext.java
index af6332e..c9c975b 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/JobContext.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/JobContext.java
@@ -55,9 +55,9 @@ public interface JobContext {
Map<String, List<JavaFutureAction<?>>> getMonitoredJobs();
/**
- * Return all added jar path which added through AddJarJob.
+ * Return all added jar path and timestamp which added through AddJarJob.
*/
- Set<String> getAddedJars();
+ Map<String, Long> getAddedJars();
/**
* Returns a local tmp dir specific to the context
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/spark-client/src/main/java/org/apache/hive/spark/client/JobContextImpl.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/JobContextImpl.java b/spark-client/src/main/java/org/apache/hive/spark/client/JobContextImpl.java
index beed8a3..b73bcd7 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/JobContextImpl.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/JobContextImpl.java
@@ -18,12 +18,10 @@
package org.apache.hive.spark.client;
import java.io.File;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hive.spark.counter.SparkCounters;
@@ -35,14 +33,14 @@ class JobContextImpl implements JobContext {
private final JavaSparkContext sc;
private final ThreadLocal<MonitorCallback> monitorCb;
private final Map<String, List<JavaFutureAction<?>>> monitoredJobs;
- private final Set<String> addedJars;
+ private final Map<String, Long> addedJars;
private final File localTmpDir;
public JobContextImpl(JavaSparkContext sc, File localTmpDir) {
this.sc = sc;
this.monitorCb = new ThreadLocal<MonitorCallback>();
monitoredJobs = new ConcurrentHashMap<String, List<JavaFutureAction<?>>>();
- addedJars = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
+ addedJars = new ConcurrentHashMap<>();
this.localTmpDir = localTmpDir;
}
@@ -65,7 +63,7 @@ class JobContextImpl implements JobContext {
}
@Override
- public Set<String> getAddedJars() {
+ public Map<String, Long> getAddedJars() {
return addedJars;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
index ceebbb3..3d682a0 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
@@ -617,7 +617,7 @@ class SparkClientImpl implements SparkClient {
jc.sc().addJar(path);
// Following remote job may refer to classes in this jar, and the remote job would be executed
// in a different thread, so we add this jar path to JobContext for further usage.
- jc.getAddedJars().add(path);
+ jc.getAddedJars().put(path, System.currentTimeMillis());
return null;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b02cd4ab/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
index 589436d..bbbd97b 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
@@ -24,7 +24,8 @@ import java.io.File;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.List;
-import java.util.Set;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
@@ -35,20 +36,21 @@ import org.apache.hadoop.fs.Path;
public class SparkClientUtilities {
protected static final transient Log LOG = LogFactory.getLog(SparkClientUtilities.class);
+ private static final Map<String, Long> downloadedFiles = new ConcurrentHashMap<>();
/**
* Add new elements to the classpath.
*
- * @param newPaths Set of classpath elements
+ * @param newPaths Map of classpath elements and corresponding timestamp
*/
- public static void addToClassPath(Set<String> newPaths, Configuration conf, File localTmpDir)
+ public static void addToClassPath(Map<String, Long> newPaths, Configuration conf, File localTmpDir)
throws Exception {
URLClassLoader loader = (URLClassLoader) Thread.currentThread().getContextClassLoader();
List<URL> curPath = Lists.newArrayList(loader.getURLs());
boolean newPathAdded = false;
- for (String newPath : newPaths) {
- URL newUrl = urlFromPathString(newPath, conf, localTmpDir);
+ for (Map.Entry<String, Long> entry : newPaths.entrySet()) {
+ URL newUrl = urlFromPathString(entry.getKey(), entry.getValue(), conf, localTmpDir);
if (newUrl != null && !curPath.contains(newUrl)) {
curPath.add(newUrl);
LOG.info("Added jar[" + newUrl + "] to classpath.");
@@ -69,7 +71,8 @@ public class SparkClientUtilities {
* @param path path string
* @return
*/
- private static URL urlFromPathString(String path, Configuration conf, File localTmpDir) {
+ private static URL urlFromPathString(String path, Long timeStamp,
+ Configuration conf, File localTmpDir) {
URL url = null;
try {
if (StringUtils.indexOf(path, "file:/") == 0) {
@@ -78,12 +81,17 @@ public class SparkClientUtilities {
Path remoteFile = new Path(path);
Path localFile =
new Path(localTmpDir.getAbsolutePath() + File.separator + remoteFile.getName());
- if (!new File(localFile.toString()).exists()) {
+ Long currentTS = downloadedFiles.get(path);
+ if (currentTS == null) {
+ currentTS = -1L;
+ }
+ if (!new File(localFile.toString()).exists() || currentTS < timeStamp) {
LOG.info("Copying " + remoteFile + " to " + localFile);
FileSystem remoteFS = remoteFile.getFileSystem(conf);
remoteFS.copyToLocalFile(remoteFile, localFile);
+ downloadedFiles.put(path, timeStamp);
}
- return urlFromPathString(localFile.toString(), conf, localTmpDir);
+ return urlFromPathString(localFile.toString(), timeStamp, conf, localTmpDir);
} else {
url = new File(path).toURL();
}
[07/23] hive git commit: Merge branch 'master' into spark
Posted by xu...@apache.org.
Merge branch 'master' into spark
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f78f6635
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f78f6635
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f78f6635
Branch: refs/heads/master
Commit: f78f66359cdbd7963c3bdfbc65663010f3531719
Parents: a8c49ef 2519915
Author: xzhang <xz...@xzdt>
Authored: Wed Sep 16 10:00:19 2015 -0700
Committer: xzhang <xz...@xzdt>
Committed: Wed Sep 16 10:00:19 2015 -0700
----------------------------------------------------------------------
ql/pom.xml | 1 +
.../read/ParquetFilterPredicateConverter.java | 148 ++++++++++
.../read/ParquetRecordReaderWrapper.java | 122 +-------
.../parquet/TestParquetRecordReaderWrapper.java | 14 +-
.../read/TestParquetFilterPredicate.java | 51 ++++
.../ql/io/sarg/TestConvertAstToSearchArg.java | 25 +-
.../clientpositive/parquet_predicate_pushdown.q | 9 +
.../clientpositive/unionall_unbalancedppd.q | 72 +++++
.../parquet_predicate_pushdown.q.out | 47 ++++
.../clientpositive/unionall_unbalancedppd.q.out | 280 +++++++++++++++++++
10 files changed, 636 insertions(+), 133 deletions(-)
----------------------------------------------------------------------
[15/23] hive git commit: HIVE-12284: Merge branch 'master' into spark
Posted by xu...@apache.org.
HIVE-12284: Merge branch 'master' into spark
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c9073aad
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c9073aad
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c9073aad
Branch: refs/heads/master
Commit: c9073aadc00b01372b85522e777acaea997d5936
Parents: 51f257a 3e21a6d
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Oct 28 05:08:53 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Oct 28 05:08:53 2015 -0700
----------------------------------------------------------------------
LICENSE | 34 +-
NOTICE | 3 +
accumulo-handler/pom.xml | 61 +-
.../hive/accumulo/HiveAccumuloHelper.java | 55 +-
.../mr/HiveAccumuloTableOutputFormat.java | 50 +-
.../hive/accumulo/TestHiveAccumuloHelper.java | 69 +-
.../mr/TestHiveAccumuloTableOutputFormat.java | 86 +-
.../apache/hadoop/hive/ant/GenVectorCode.java | 61 +-
.../apache/hadoop/hive/ant/QTestGenTask.java | 13 +
beeline/pom.xml | 44 +-
.../java/org/apache/hive/beeline/BeeLine.java | 304 +-
.../org/apache/hive/beeline/BeeLineOpts.java | 52 +-
.../hive/beeline/ClientCommandHookFactory.java | 85 +
.../org/apache/hive/beeline/ClientHook.java | 33 +
.../java/org/apache/hive/beeline/Commands.java | 492 +-
.../apache/hive/beeline/DatabaseConnection.java | 4 +-
.../apache/hive/beeline/HiveSchemaHelper.java | 4 +-
.../org/apache/hive/beeline/HiveSchemaTool.java | 22 +-
.../hive/beeline/cli/CliOptionsProcessor.java | 104 +
.../org/apache/hive/beeline/cli/HiveCli.java | 37 +
.../hive/beeline/TestBeelineArgParsing.java | 1 +
.../beeline/TestClientCommandHookFactory.java | 32 +
.../apache/hive/beeline/cli/TestHiveCli.java | 289 +
beeline/src/test/resources/hive-site.xml | 37 +
bin/beeline | 10 +
bin/ext/cli.cmd | 39 +-
bin/ext/cli.sh | 35 +-
bin/ext/hbaseimport.cmd | 35 +
bin/ext/hbaseimport.sh | 27 +
bin/ext/hbaseschematool.sh | 27 +
bin/ext/llap.sh | 49 +
bin/ext/util/execHiveCmd.sh | 21 +-
bin/hive | 8 +-
cli/pom.xml | 63 +-
.../org/apache/hadoop/hive/cli/CliDriver.java | 33 +-
common/pom.xml | 41 +-
.../hadoop/hive/common/CallableWithNdc.java | 44 +
.../hadoop/hive/common/CompressionUtils.java | 131 +
.../apache/hadoop/hive/common/DiskRange.java | 99 -
.../hadoop/hive/common/DiskRangeInfo.java | 59 +
.../hadoop/hive/common/DiskRangeList.java | 205 -
.../apache/hadoop/hive/common/FileUtils.java | 38 +
.../apache/hadoop/hive/common/ObjectPair.java | 5 +
.../hadoop/hive/common/RunnableWithNdc.java | 43 +
.../hadoop/hive/common/StatsSetupConst.java | 16 -
.../common/metrics/common/MetricsConstant.java | 22 +-
.../org/apache/hadoop/hive/conf/HiveConf.java | 253 +-
.../hadoop/hive/conf/HiveVariableSource.java | 24 +
.../hadoop/hive/conf/VariableSubstitution.java | 70 +
.../apache/hadoop/hive/ql/log/PerfLogger.java | 196 +
.../apache/hive/common/util/BloomFilter.java | 20 +-
.../hive/common/util/FixedSizedObjectPool.java | 315 +
.../apache/hive/common/util/ReflectionUtil.java | 2 +-
.../hive/common/util/ShutdownHookManager.java | 52 +-
common/src/main/resources/hive-log4j.properties | 88 +
common/src/main/resources/hive-log4j2.xml | 4 +-
.../hive/common/type/TestHiveDecimal.java | 12 +-
.../apache/hadoop/hive/conf/TestHiveConf.java | 25 +
.../hive/conf/TestVariableSubstitution.java | 63 +
.../common/util/TestFixedSizedObjectPool.java | 246 +
.../common/util/TestShutdownHookManager.java | 22 +-
contrib/pom.xml | 44 +-
.../hive/contrib/serde2/MultiDelimitSerDe.java | 1 +
data/conf/hive-log4j2.xml | 27 +
data/conf/hive-site.xml | 45 +-
data/conf/llap/hive-site.xml | 266 +
data/conf/llap/llap-daemon-site.xml | 57 +
data/conf/llap/tez-site.xml | 6 +
data/conf/spark/standalone/hive-site.xml | 6 -
data/conf/spark/yarn-client/hive-site.xml | 6 -
data/conf/tez/hive-site.xml | 26 +-
data/conf/tez/tez-site.xml | 6 +
data/files/decimal_1_1.txt | 30 +
data/files/escape_crlf.txt | 2 +
data/files/identity_udf.jar | Bin 0 -> 710 bytes
data/files/mapNull.txt | 1 +
data/files/sample2.json | 2 +
errata.txt | 68 +
hbase-handler/pom.xml | 246 +-
.../apache/hadoop/hive/hbase/HBaseSerDe.java | 1 +
.../hadoop/hive/hbase/HBaseSerDeHelper.java | 21 +-
.../hadoop/hive/hbase/HBaseSerDeParameters.java | 20 +-
.../hadoop/hive/hbase/HBaseStatsAggregator.java | 128 -
.../hadoop/hive/hbase/HBaseStatsPublisher.java | 154 -
.../hive/hbase/HBaseStatsSetupConstants.java | 34 -
.../hadoop/hive/hbase/HBaseStatsUtils.java | 135 -
.../hive/hbase/HiveHBaseTableInputFormat.java | 105 +-
.../hive/hbase/HiveHBaseTableOutputFormat.java | 9 +
.../HiveHBaseTableSnapshotInputFormat.java | 21 +-
.../hbase/struct/AvroHBaseValueFactory.java | 3 +-
.../hadoop/hive/hbase/TestHBaseSerDe.java | 12 +-
.../queries/positive/hbase_handler_snapshot.q | 4 +
.../src/test/queries/positive/hbase_queries.q | 16 +
.../src/test/queries/positive/hbase_stats.q | 30 -
.../src/test/queries/positive/hbase_stats2.q | 31 -
.../positive/hbase_stats_empty_partition.q | 13 -
.../positive/hbase_handler_snapshot.q.out | 22 +
.../test/results/positive/hbase_queries.q.out | 99 +
.../src/test/results/positive/hbase_stats.q.out | 311 -
.../test/results/positive/hbase_stats2.q.out | 311 -
.../positive/hbase_stats_empty_partition.q.out | 63 -
hcatalog/conf/proto-hive-site.xml | 2 +-
hcatalog/core/pom.xml | 194 +-
.../apache/hive/hcatalog/data/JsonSerDe.java | 8 +-
.../hive/hcatalog/data/TestJsonSerDe.java | 36 +
hcatalog/hcatalog-pig-adapter/pom.xml | 160 +-
.../apache/hive/hcatalog/pig/HCatLoader.java | 9 +
.../hive/hcatalog/pig/TestHCatLoader.java | 72 +-
.../hcatalog/pig/TestHCatLoaderEncryption.java | 64 +-
hcatalog/pom.xml | 65 +-
hcatalog/server-extensions/pom.xml | 29 +-
.../listener/TestNotificationListener.java | 4 +-
.../templates/conf/hive-site.xml.template | 2 +-
hcatalog/streaming/pom.xml | 47 +-
.../streaming/AbstractRecordWriter.java | 93 +-
.../streaming/DelimitedInputWriter.java | 54 +-
.../hive/hcatalog/streaming/HiveEndPoint.java | 21 +
.../hive/hcatalog/streaming/InvalidTable.java | 8 +
.../hcatalog/streaming/StrictJsonWriter.java | 46 +-
.../mutate/worker/BucketIdResolverImpl.java | 16 +-
.../hive/hcatalog/streaming/TestStreaming.java | 764 +-
.../mutate/worker/TestBucketIdResolverImpl.java | 2 +-
hcatalog/webhcat/java-client/pom.xml | 39 +-
.../hive/hcatalog/api/TestHCatClient.java | 39 +-
hcatalog/webhcat/svr/pom.xml | 60 +-
.../hive/hcatalog/templeton/AppConfig.java | 21 +
.../apache/hive/hcatalog/templeton/Server.java | 19 +-
.../hcatalog/templeton/tool/LaunchMapper.java | 7 +-
hplsql/pom.xml | 31 +-
.../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 70 +-
.../main/java/org/apache/hive/hplsql/Cmp.java | 314 +
.../java/org/apache/hive/hplsql/Column.java | 29 +-
.../main/java/org/apache/hive/hplsql/Conn.java | 21 +
.../main/java/org/apache/hive/hplsql/Copy.java | 50 +-
.../main/java/org/apache/hive/hplsql/Exec.java | 66 +-
.../java/org/apache/hive/hplsql/Expression.java | 33 +-
.../main/java/org/apache/hive/hplsql/File.java | 18 +-
.../main/java/org/apache/hive/hplsql/Meta.java | 28 +-
.../main/java/org/apache/hive/hplsql/Query.java | 18 +
.../java/org/apache/hive/hplsql/Select.java | 23 +-
.../main/java/org/apache/hive/hplsql/Stmt.java | 8 +-
.../main/java/org/apache/hive/hplsql/Var.java | 110 +-
.../apache/hive/hplsql/functions/Function.java | 6 +-
.../hive/hplsql/functions/FunctionMisc.java | 121 +
.../org/apache/hive/hplsql/TestHplsqlLocal.java | 18 +
.../apache/hive/hplsql/TestHplsqlOffline.java | 5 +
hplsql/src/test/queries/db/cmp_row_count.sql | 4 +
hplsql/src/test/queries/db/cmp_sum.sql | 3 +
hplsql/src/test/queries/db/copy_to_file.sql | 2 +
hplsql/src/test/queries/db/copy_to_hdfs.sql | 2 +
hplsql/src/test/queries/db/copy_to_table.sql | 2 +
hplsql/src/test/queries/db/part_count.sql | 17 +
hplsql/src/test/queries/db/part_count_by.sql | 4 +
hplsql/src/test/queries/db/schema.sql | 32 +
hplsql/src/test/queries/db/select_into.sql | 20 +-
hplsql/src/test/queries/db/select_into2.sql | 17 +
.../test/queries/local/create_procedure2.sql | 16 +
hplsql/src/test/queries/local/if2.sql | 5 +
hplsql/src/test/queries/local/include.sql | 2 +
hplsql/src/test/queries/local/include_file.sql | 1 +
hplsql/src/test/queries/local/mult_div.sql | 8 +
hplsql/src/test/queries/offline/select_db2.sql | 5 +
.../src/test/results/db/cmp_row_count.out.txt | 12 +
hplsql/src/test/results/db/cmp_sum.out.txt | 320 +
hplsql/src/test/results/db/copy_to_file.out.txt | 6 +
hplsql/src/test/results/db/copy_to_hdfs.out.txt | 4 +
.../src/test/results/db/copy_to_table.out.txt | 2 +
hplsql/src/test/results/db/part_count.out.txt | 15 +
.../src/test/results/db/part_count_by.out.txt | 13 +
hplsql/src/test/results/db/select_into.out.txt | 58 +-
hplsql/src/test/results/db/select_into2.out.txt | 19 +
.../results/local/create_procedure2.out.txt | 10 +
hplsql/src/test/results/local/if2.out.txt | 4 +
hplsql/src/test/results/local/include.out.txt | 8 +
hplsql/src/test/results/local/mult_div.out.txt | 7 +
.../src/test/results/offline/select_db2.out.txt | 6 +
hwi/pom.xml | 61 +-
itests/custom-serde/pom.xml | 31 +-
itests/hcatalog-unit/pom.xml | 389 +-
itests/hive-jmh/pom.xml | 38 +-
.../vectorization/VectorizationBench.java | 198 +-
itests/hive-minikdc/pom.xml | 181 +-
itests/hive-unit-hadoop2/pom.xml | 12 +-
.../hive/thrift/TestHadoop20SAuthBridge.java | 420 -
.../hive/thrift/TestHadoopAuthBridge23.java | 422 +
itests/hive-unit/pom.xml | 323 +-
.../org/apache/hive/jdbc/miniHS2/MiniHS2.java | 14 +-
.../hadoop/hive/metastore/TestAdminUser.java | 4 +-
.../hive/metastore/TestHiveMetaStore.java | 66 +
.../metastore/hbase/HBaseIntegrationTests.java | 111 +
.../TestHBaseAggrStatsCacheIntegration.java | 691 +
.../hive/metastore/hbase/TestHBaseImport.java | 650 +
.../metastore/hbase/TestHBaseMetastoreSql.java | 223 +
.../hbase/TestHBaseStoreIntegration.java | 1796 +
.../hbase/TestStorageDescriptorSharing.java | 191 +
.../hadoop/hive/ql/TestLocationQueries.java | 2 +-
.../hadoop/hive/ql/history/TestHiveHistory.java | 2 +-
.../hive/ql/security/FolderPermissionBase.java | 63 +-
.../TestClientSideAuthorizationProvider.java | 9 +
...ageBasedClientSideAuthorizationProvider.java | 6 +
...StorageBasedMetastoreAuthorizationReads.java | 7 +-
.../hive/ql/txn/compactor/TestCompactor.java | 13 +-
.../hive/beeline/TestBeeLineWithArgs.java | 21 +
.../org/apache/hive/jdbc/TestJdbcDriver2.java | 108 +-
.../apache/hive/jdbc/TestJdbcWithMiniHS2.java | 153 +-
.../test/java/org/apache/hive/jdbc/TestSSL.java | 44 +-
.../hive/jdbc/cbo_rp_TestJdbcDriver2.java | 2419 ++
.../apache/hive/jdbc/miniHS2/TestMiniHS2.java | 8 +-
itests/pom.xml | 9 +-
itests/qtest-accumulo/pom.xml | 404 +-
itests/qtest-spark/pom.xml | 38 +-
itests/qtest/pom.xml | 545 +-
.../test/resources/testconfiguration.properties | 83 +-
itests/test-serde/pom.xml | 32 +-
itests/util/pom.xml | 131 +-
.../hadoop/hive/hbase/HBaseQTestUtil.java | 8 +-
.../metastore/hbase/HBaseStoreTestUtil.java | 45 +
.../org/apache/hadoop/hive/ql/QTestUtil.java | 153 +-
.../hive/ql/stats/DummyStatsAggregator.java | 18 +-
.../hive/ql/stats/DummyStatsPublisher.java | 15 +-
.../ql/stats/KeyVerifyingStatsAggregator.java | 13 +-
jdbc/pom.xml | 29 +-
.../org/apache/hive/jdbc/HiveConnection.java | 63 +-
.../apache/hive/jdbc/HivePreparedStatement.java | 4 +-
.../hive/jdbc/ZooKeeperHiveClientHelper.java | 34 +-
llap-client/pom.xml | 147 +
.../llap/configuration/LlapConfiguration.java | 176 +
.../apache/hadoop/hive/llap/io/api/LlapIo.java | 27 +
.../hadoop/hive/llap/io/api/LlapIoProxy.java | 78 +
llap-server/bin/llap-daemon-env.sh | 44 +
llap-server/bin/llapDaemon.sh | 152 +
llap-server/bin/runLlapDaemon.sh | 130 +
llap-server/pom.xml | 272 +
.../daemon/rpc/LlapDaemonProtocolProtos.java | 12674 ++++++
.../hadoop/hive/llap/ConsumerFeedback.java | 33 +
.../llap/IncrementalObjectSizeEstimator.java | 628 +
.../org/apache/hadoop/hive/llap/LlapNodeId.java | 86 +
.../hadoop/hive/llap/cache/BuddyAllocator.java | 547 +
.../hive/llap/cache/BuddyAllocatorMXBean.java | 62 +
.../apache/hadoop/hive/llap/cache/Cache.java | 27 +
.../hive/llap/cache/EvictionAwareAllocator.java | 30 +
.../hive/llap/cache/EvictionDispatcher.java | 52 +
.../hive/llap/cache/EvictionListener.java | 23 +
.../hive/llap/cache/LlapCacheableBuffer.java | 56 +
.../hadoop/hive/llap/cache/LlapDataBuffer.java | 142 +
.../hive/llap/cache/LlapOomDebugDump.java | 23 +
.../hadoop/hive/llap/cache/LowLevelCache.java | 76 +
.../hive/llap/cache/LowLevelCacheCounters.java | 26 +
.../hive/llap/cache/LowLevelCacheImpl.java | 540 +
.../llap/cache/LowLevelCacheMemoryManager.java | 111 +
.../hive/llap/cache/LowLevelCachePolicy.java | 30 +
.../llap/cache/LowLevelFifoCachePolicy.java | 116 +
.../llap/cache/LowLevelLrfuCachePolicy.java | 432 +
.../hadoop/hive/llap/cache/MemoryManager.java | 25 +
.../hadoop/hive/llap/cache/NoopCache.java | 33 +
.../hive/llap/cli/LlapOptionsProcessor.java | 184 +
.../hadoop/hive/llap/cli/LlapServiceDriver.java | 283 +
.../llap/counters/QueryFragmentCounters.java | 143 +
.../hive/llap/daemon/ContainerRunner.java | 34 +
.../daemon/FinishableStateUpdateHandler.java | 21 +
.../llap/daemon/FragmentCompletionHandler.java | 22 +
.../hadoop/hive/llap/daemon/HistoryLogger.java | 154 +
.../hive/llap/daemon/KilledTaskHandler.java | 29 +
.../daemon/LlapDaemonProtocolBlockingPB.java | 22 +
.../hive/llap/daemon/QueryFailedHandler.java | 20 +
.../hive/llap/daemon/impl/AMReporter.java | 474 +
.../llap/daemon/impl/ContainerRunnerImpl.java | 353 +
.../impl/EvictingPriorityBlockingQueue.java | 76 +
.../hive/llap/daemon/impl/LlapDaemon.java | 435 +
.../hive/llap/daemon/impl/LlapDaemonMXBean.java | 83 +
.../impl/LlapDaemonProtocolClientImpl.java | 126 +
.../impl/LlapDaemonProtocolServerImpl.java | 155 +
.../hive/llap/daemon/impl/LlapTaskReporter.java | 451 +
.../llap/daemon/impl/PriorityBlockingDeque.java | 767 +
.../hive/llap/daemon/impl/QueryFileCleaner.java | 94 +
.../llap/daemon/impl/QueryFragmentInfo.java | 181 +
.../hadoop/hive/llap/daemon/impl/QueryInfo.java | 252 +
.../hive/llap/daemon/impl/QueryTracker.java | 242 +
.../hadoop/hive/llap/daemon/impl/Scheduler.java | 42 +
.../llap/daemon/impl/TaskExecutorService.java | 715 +
.../llap/daemon/impl/TaskRunnerCallable.java | 491 +
.../comparator/FirstInFirstOutComparator.java | 81 +
.../comparator/ShortestJobFirstComparator.java | 70 +
.../llap/daemon/registry/ServiceInstance.java | 73 +
.../daemon/registry/ServiceInstanceSet.java | 57 +
.../llap/daemon/registry/ServiceRegistry.java | 59 +
.../registry/impl/LlapFixedRegistryImpl.java | 222 +
.../registry/impl/LlapRegistryService.java | 86 +
.../registry/impl/LlapYarnRegistryImpl.java | 383 +
.../llap/daemon/services/impl/LlapWebApp.java | 12 +
.../daemon/services/impl/LlapWebServices.java | 53 +
.../llap/io/api/impl/ColumnVectorBatch.java | 46 +
.../hive/llap/io/api/impl/LlapInputFormat.java | 322 +
.../hive/llap/io/api/impl/LlapIoImpl.java | 152 +
.../llap/io/decode/ColumnVectorProducer.java | 36 +
.../llap/io/decode/EncodedDataConsumer.java | 194 +
.../llap/io/decode/OrcColumnVectorProducer.java | 79 +
.../llap/io/decode/OrcEncodedDataConsumer.java | 161 +
.../hive/llap/io/decode/ReadPipeline.java | 27 +
.../llap/io/encoded/OrcEncodedDataReader.java | 971 +
.../llap/io/metadata/CompressionBuffer.java | 119 +
.../hive/llap/io/metadata/OrcFileMetadata.java | 231 +
.../hive/llap/io/metadata/OrcMetadataCache.java | 91 +
.../llap/io/metadata/OrcStripeMetadata.java | 163 +
.../hive/llap/metrics/LlapDaemonCacheInfo.java | 56 +
.../llap/metrics/LlapDaemonCacheMetrics.java | 154 +
.../metrics/LlapDaemonCustomMetricsInfo.java | 43 +
.../llap/metrics/LlapDaemonExecutorInfo.java | 56 +
.../llap/metrics/LlapDaemonExecutorMetrics.java | 196 +
.../hive/llap/metrics/LlapDaemonQueueInfo.java | 50 +
.../llap/metrics/LlapDaemonQueueMetrics.java | 116 +
.../hive/llap/metrics/LlapMetricsSystem.java | 57 +
.../hadoop/hive/llap/metrics/MetricsUtils.java | 44 +
.../hadoop/hive/llap/old/BufferInProgress.java | 82 +
.../apache/hadoop/hive/llap/old/BufferPool.java | 225 +
.../hadoop/hive/llap/old/CachePolicy.java | 34 +
.../apache/hadoop/hive/llap/old/ChunkPool.java | 237 +
.../protocol/LlapTaskUmbilicalProtocol.java | 39 +
.../AttemptRegistrationListener.java | 24 +
.../hive/llap/shufflehandler/DirWatcher.java | 414 +
.../shufflehandler/FadvisedChunkedFile.java | 78 +
.../llap/shufflehandler/FadvisedFileRegion.java | 160 +
.../hive/llap/shufflehandler/IndexCache.java | 199 +
.../llap/shufflehandler/ShuffleHandler.java | 1047 +
.../hadoop/hive/llap/tezplugins/Converters.java | 266 +
.../llap/tezplugins/LlapContainerLauncher.java | 43 +
.../llap/tezplugins/LlapTaskCommunicator.java | 617 +
.../hive/llap/tezplugins/TaskCommunicator.java | 479 +
.../tezplugins/helpers/SourceStateTracker.java | 291 +
.../apache/tez/dag/app/rm/ContainerFactory.java | 51 +
.../dag/app/rm/LlapTaskSchedulerService.java | 1363 +
.../main/resources/llap-daemon-log4j.properties | 78 +
llap-server/src/main/resources/llap.py | 75 +
llap-server/src/main/resources/package.py | 153 +
llap-server/src/main/resources/params.py | 39 +
llap-server/src/main/resources/templates.py | 123 +
.../src/main/resources/webapps/llap/.keep | 0
.../src/protobuf/LlapDaemonProtocol.proto | 125 +
.../hive/llap/cache/TestBuddyAllocator.java | 287 +
.../TestIncrementalObjectSizeEstimator.java | 247 +
.../hive/llap/cache/TestLowLevelCacheImpl.java | 520 +
.../llap/cache/TestLowLevelLrfuCachePolicy.java | 322 +
.../hive/llap/cache/TestOrcMetadataCache.java | 112 +
.../hive/llap/daemon/MiniLlapCluster.java | 192 +
.../daemon/impl/TaskExecutorTestHelpers.java | 243 +
.../impl/TestLlapDaemonProtocolServerImpl.java | 61 +
.../daemon/impl/TestTaskExecutorService.java | 290 +
.../TestFirstInFirstOutComparator.java | 321 +
.../TestShortestJobFirstComparator.java | 199 +
.../llap/tezplugins/TestTaskCommunicator.java | 143 +
.../app/rm/TestLlapTaskSchedulerService.java | 454 +
.../test/resources/llap-daemon-log4j.properties | 94 +
.../src/test/resources/llap-daemon-site.xml | 73 +
llap-server/src/test/resources/log4j.properties | 19 +
.../src/test/resources/webapps/llap/.keep | 0
metastore/if/hive_metastore.thrift | 60 +
metastore/pom.xml | 110 +-
.../upgrade/derby/021-HIVE-11970.derby.sql | 6 +
.../upgrade/derby/hive-schema-1.3.0.derby.sql | 12 +-
.../upgrade/derby/hive-schema-2.0.0.derby.sql | 12 +-
.../derby/upgrade-1.2.0-to-1.3.0.derby.sql | 1 +
.../derby/upgrade-1.2.0-to-2.0.0.derby.sql | 3 +-
.../upgrade/mssql/007-HIVE-11970.mssql.sql | 6 +
.../upgrade/mssql/hive-schema-1.3.0.mssql.sql | 12 +-
.../upgrade/mssql/hive-schema-2.0.0.mssql.sql | 12 +-
.../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql | 1 +
.../mssql/upgrade-1.2.0-to-2.0.0.mssql.sql | 7 +-
.../upgrade/mysql/022-HIVE-11970.mysql.sql | 6 +
.../upgrade/mysql/hive-schema-1.3.0.mysql.sql | 12 +-
.../upgrade/mysql/hive-schema-2.0.0.mysql.sql | 12 +-
.../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql | 1 +
.../mysql/upgrade-1.2.0-to-2.0.0.mysql.sql | 2 +
.../upgrade/oracle/022-HIVE-11970.oracle.sql | 23 +
.../oracle/hive-schema-0.13.0.oracle.sql | 10 +-
.../oracle/hive-schema-0.14.0.oracle.sql | 10 +-
.../upgrade/oracle/hive-schema-1.3.0.oracle.sql | 12 +-
.../upgrade/oracle/hive-schema-2.0.0.oracle.sql | 12 +-
.../oracle/hive-txn-schema-0.13.0.oracle.sql | 10 +-
.../oracle/hive-txn-schema-0.14.0.oracle.sql | 10 +-
.../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql | 2 +
.../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql | 2 +
.../postgres/021-HIVE-11970.postgres.sql | 6 +
.../postgres/hive-schema-1.3.0.postgres.sql | 12 +-
.../postgres/hive-schema-2.0.0.postgres.sql | 12 +-
.../upgrade-1.2.0-to-1.3.0.postgres.sql | 1 +
.../upgrade-1.2.0-to-2.0.0.postgres.sql | 1 +
.../apache/hadoop/hive/metastore/Metastore.java | 1331 +
.../metastore/hbase/HbaseMetastoreProto.java | 34901 +++++++++++++++++
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 32381 ++++++++++-----
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 2989 +-
.../ThriftHiveMetastore_server.skeleton.cpp | 25 +
.../thrift/gen-cpp/hive_metastore_constants.cpp | 2 +-
.../thrift/gen-cpp/hive_metastore_constants.h | 2 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 3516 +-
.../gen/thrift/gen-cpp/hive_metastore_types.h | 1666 +-
.../hive/metastore/api/AbortTxnRequest.java | 6 +-
.../metastore/api/AddDynamicPartitions.java | 6 +-
.../metastore/api/AddPartitionsRequest.java | 8 +-
.../hive/metastore/api/AddPartitionsResult.java | 4 +-
.../hadoop/hive/metastore/api/AggrStats.java | 6 +-
.../metastore/api/AlreadyExistsException.java | 4 +-
.../metastore/api/BinaryColumnStatsData.java | 10 +-
.../metastore/api/BooleanColumnStatsData.java | 10 +-
.../hive/metastore/api/CheckLockRequest.java | 6 +-
.../metastore/api/ClearFileMetadataRequest.java | 438 +
.../metastore/api/ClearFileMetadataResult.java | 283 +
.../hive/metastore/api/ColumnStatistics.java | 4 +-
.../metastore/api/ColumnStatisticsData.java | 2 +-
.../metastore/api/ColumnStatisticsDesc.java | 8 +-
.../hive/metastore/api/ColumnStatisticsObj.java | 4 +-
.../hive/metastore/api/CommitTxnRequest.java | 6 +-
.../hive/metastore/api/CompactionRequest.java | 4 +-
.../hive/metastore/api/CompactionType.java | 2 +-
.../api/ConfigValSecurityException.java | 4 +-
.../api/CurrentNotificationEventId.java | 6 +-
.../hadoop/hive/metastore/api/Database.java | 4 +-
.../apache/hadoop/hive/metastore/api/Date.java | 6 +-
.../hive/metastore/api/DateColumnStatsData.java | 8 +-
.../hadoop/hive/metastore/api/Decimal.java | 6 +-
.../metastore/api/DecimalColumnStatsData.java | 8 +-
.../metastore/api/DoubleColumnStatsData.java | 12 +-
.../hive/metastore/api/DropPartitionsExpr.java | 6 +-
.../metastore/api/DropPartitionsRequest.java | 12 +-
.../metastore/api/DropPartitionsResult.java | 4 +-
.../hive/metastore/api/EnvironmentContext.java | 4 +-
.../hive/metastore/api/EventRequestType.java | 2 +-
.../hadoop/hive/metastore/api/FieldSchema.java | 4 +-
.../metastore/api/FileMetadataExprType.java | 42 +
.../hive/metastore/api/FireEventRequest.java | 6 +-
.../metastore/api/FireEventRequestData.java | 2 +-
.../hive/metastore/api/FireEventResponse.java | 4 +-
.../hadoop/hive/metastore/api/Function.java | 6 +-
.../hadoop/hive/metastore/api/FunctionType.java | 2 +-
.../metastore/api/GetAllFunctionsResponse.java | 40 +-
.../api/GetFileMetadataByExprRequest.java | 773 +
.../api/GetFileMetadataByExprResult.java | 553 +
.../metastore/api/GetFileMetadataRequest.java | 438 +
.../metastore/api/GetFileMetadataResult.java | 540 +
.../metastore/api/GetOpenTxnsInfoResponse.java | 6 +-
.../hive/metastore/api/GetOpenTxnsResponse.java | 6 +-
.../api/GetPrincipalsInRoleRequest.java | 4 +-
.../api/GetPrincipalsInRoleResponse.java | 4 +-
.../api/GetRoleGrantsForPrincipalRequest.java | 4 +-
.../api/GetRoleGrantsForPrincipalResponse.java | 4 +-
.../api/GrantRevokePrivilegeRequest.java | 6 +-
.../api/GrantRevokePrivilegeResponse.java | 6 +-
.../metastore/api/GrantRevokeRoleRequest.java | 6 +-
.../metastore/api/GrantRevokeRoleResponse.java | 6 +-
.../hive/metastore/api/GrantRevokeType.java | 2 +-
.../hive/metastore/api/HeartbeatRequest.java | 8 +-
.../metastore/api/HeartbeatTxnRangeRequest.java | 8 +-
.../api/HeartbeatTxnRangeResponse.java | 4 +-
.../hive/metastore/api/HiveObjectPrivilege.java | 4 +-
.../hive/metastore/api/HiveObjectRef.java | 4 +-
.../hive/metastore/api/HiveObjectType.java | 2 +-
.../apache/hadoop/hive/metastore/api/Index.java | 10 +-
.../api/IndexAlreadyExistsException.java | 4 +-
.../metastore/api/InsertEventRequestData.java | 4 +-
.../metastore/api/InvalidInputException.java | 4 +-
.../metastore/api/InvalidObjectException.java | 4 +-
.../api/InvalidOperationException.java | 4 +-
.../api/InvalidPartitionException.java | 4 +-
.../hive/metastore/api/LockComponent.java | 4 +-
.../hadoop/hive/metastore/api/LockLevel.java | 2 +-
.../hadoop/hive/metastore/api/LockRequest.java | 6 +-
.../hadoop/hive/metastore/api/LockResponse.java | 6 +-
.../hadoop/hive/metastore/api/LockState.java | 2 +-
.../hadoop/hive/metastore/api/LockType.java | 2 +-
.../hive/metastore/api/LongColumnStatsData.java | 12 +-
.../hive/metastore/api/MetaException.java | 4 +-
.../hive/metastore/api/MetadataPpdResult.java | 517 +
.../hive/metastore/api/NoSuchLockException.java | 4 +-
.../metastore/api/NoSuchObjectException.java | 4 +-
.../hive/metastore/api/NoSuchTxnException.java | 4 +-
.../hive/metastore/api/NotificationEvent.java | 8 +-
.../metastore/api/NotificationEventRequest.java | 8 +-
.../api/NotificationEventResponse.java | 4 +-
.../hive/metastore/api/OpenTxnRequest.java | 6 +-
.../hive/metastore/api/OpenTxnsResponse.java | 4 +-
.../apache/hadoop/hive/metastore/api/Order.java | 6 +-
.../hadoop/hive/metastore/api/Partition.java | 8 +-
.../hive/metastore/api/PartitionEventType.java | 2 +-
.../api/PartitionListComposingSpec.java | 4 +-
.../hive/metastore/api/PartitionSpec.java | 4 +-
.../api/PartitionSpecWithSharedSD.java | 4 +-
.../hive/metastore/api/PartitionWithoutSD.java | 8 +-
.../metastore/api/PartitionsByExprRequest.java | 6 +-
.../metastore/api/PartitionsByExprResult.java | 6 +-
.../metastore/api/PartitionsStatsRequest.java | 4 +-
.../metastore/api/PartitionsStatsResult.java | 4 +-
.../metastore/api/PrincipalPrivilegeSet.java | 4 +-
.../hive/metastore/api/PrincipalType.java | 2 +-
.../hadoop/hive/metastore/api/PrivilegeBag.java | 4 +-
.../hive/metastore/api/PrivilegeGrantInfo.java | 8 +-
.../metastore/api/PutFileMetadataRequest.java | 588 +
.../metastore/api/PutFileMetadataResult.java | 283 +
.../hive/metastore/api/RequestPartsSpec.java | 2 +-
.../hadoop/hive/metastore/api/ResourceType.java | 2 +-
.../hadoop/hive/metastore/api/ResourceUri.java | 4 +-
.../apache/hadoop/hive/metastore/api/Role.java | 6 +-
.../hive/metastore/api/RolePrincipalGrant.java | 8 +-
.../hadoop/hive/metastore/api/Schema.java | 4 +-
.../hadoop/hive/metastore/api/SerDeInfo.java | 4 +-
.../api/SetPartitionsStatsRequest.java | 4 +-
.../hive/metastore/api/ShowCompactRequest.java | 4 +-
.../hive/metastore/api/ShowCompactResponse.java | 4 +-
.../api/ShowCompactResponseElement.java | 6 +-
.../hive/metastore/api/ShowLocksRequest.java | 4 +-
.../hive/metastore/api/ShowLocksResponse.java | 4 +-
.../metastore/api/ShowLocksResponseElement.java | 12 +-
.../hadoop/hive/metastore/api/SkewedInfo.java | 4 +-
.../hive/metastore/api/StorageDescriptor.java | 10 +-
.../metastore/api/StringColumnStatsData.java | 12 +-
.../apache/hadoop/hive/metastore/api/Table.java | 12 +-
.../hive/metastore/api/TableStatsRequest.java | 4 +-
.../hive/metastore/api/TableStatsResult.java | 4 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 7884 +++-
.../hive/metastore/api/TxnAbortedException.java | 4 +-
.../hadoop/hive/metastore/api/TxnInfo.java | 6 +-
.../hive/metastore/api/TxnOpenException.java | 4 +-
.../hadoop/hive/metastore/api/TxnState.java | 2 +-
.../apache/hadoop/hive/metastore/api/Type.java | 4 +-
.../hive/metastore/api/UnknownDBException.java | 4 +-
.../api/UnknownPartitionException.java | 4 +-
.../metastore/api/UnknownTableException.java | 4 +-
.../hive/metastore/api/UnlockRequest.java | 6 +-
.../hadoop/hive/metastore/api/Version.java | 4 +-
.../metastore/api/hive_metastoreConstants.java | 2 +-
.../gen-php/metastore/ThriftHiveMetastore.php | 2812 +-
.../src/gen/thrift/gen-php/metastore/Types.php | 1037 +-
.../hive_metastore/ThriftHiveMetastore-remote | 51 +-
.../hive_metastore/ThriftHiveMetastore.py | 4109 +-
.../thrift/gen-py/hive_metastore/constants.py | 2 +-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 1304 +-
.../thrift/gen-rb/hive_metastore_constants.rb | 2 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 177 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 269 +-
.../hive/metastore/FileMetadataHandler.java | 30 +
.../hadoop/hive/metastore/HiveAlterHandler.java | 40 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 422 +-
.../hive/metastore/HiveMetaStoreClient.java | 216 +-
.../hadoop/hive/metastore/IMetaStoreClient.java | 33 +
.../hive/metastore/MetaStoreDirectSql.java | 39 +-
.../hive/metastore/MetaStoreSchemaInfo.java | 47 +
.../hadoop/hive/metastore/MetaStoreUtils.java | 50 +-
.../hadoop/hive/metastore/ObjectStore.java | 479 +-
.../hive/metastore/PartFilterExprUtil.java | 149 +
.../metastore/PartitionExpressionProxy.java | 20 +
.../apache/hadoop/hive/metastore/RawStore.java | 98 +-
.../hadoop/hive/metastore/RawStoreProxy.java | 5 +-
.../hive/metastore/RetryingHMSHandler.java | 33 +-
.../apache/hadoop/hive/metastore/Warehouse.java | 4 +-
.../filemeta/OrcFileMetadataHandler.java | 63 +
.../hbase/AggrStatsInvalidatorFilter.java | 121 +
.../hadoop/hive/metastore/hbase/Counter.java | 53 +
.../hive/metastore/hbase/HBaseConnection.java | 96 +
.../metastore/hbase/HBaseFilterPlanUtil.java | 612 +
.../hive/metastore/hbase/HBaseImport.java | 535 +
.../hive/metastore/hbase/HBaseReadWrite.java | 2121 +
.../hive/metastore/hbase/HBaseSchemaTool.java | 240 +
.../hadoop/hive/metastore/hbase/HBaseStore.java | 2430 ++
.../hadoop/hive/metastore/hbase/HBaseUtils.java | 1340 +
.../hive/metastore/hbase/ObjectCache.java | 81 +
.../hive/metastore/hbase/PartitionCache.java | 168 +
.../metastore/hbase/PartitionKeyComparator.java | 292 +
.../hbase/SharedStorageDescriptor.java | 251 +
.../hadoop/hive/metastore/hbase/StatsCache.java | 326 +
.../metastore/hbase/TephraHBaseConnection.java | 127 +
.../metastore/hbase/VanillaHBaseConnection.java | 137 +
.../stats/BinaryColumnStatsAggregator.java | 35 +
.../stats/BooleanColumnStatsAggregator.java | 35 +
.../hbase/stats/ColumnStatsAggregator.java | 26 +
.../stats/ColumnStatsAggregatorFactory.java | 94 +
.../stats/DecimalColumnStatsAggregator.java | 43 +
.../stats/DoubleColumnStatsAggregator.java | 36 +
.../hbase/stats/LongColumnStatsAggregator.java | 36 +
.../stats/StringColumnStatsAggregator.java | 36 +
.../hive/metastore/parser/ExpressionTree.java | 9 +-
.../metastore/txn/CompactionTxnHandler.java | 36 +-
.../hadoop/hive/metastore/txn/TxnDbUtil.java | 4 +-
.../hadoop/hive/metastore/txn/TxnHandler.java | 225 +-
.../metastore/hbase/hbase_metastore_proto.proto | 282 +
.../hadoop/hive/metastore/metastore.proto | 29 +
.../metastore/AlternateFailurePreListener.java | 2 +-
.../DummyRawStoreControlledCommit.java | 68 +-
.../DummyRawStoreForJdoConnection.java | 61 +-
.../MockPartitionExpressionForMetastore.java | 12 +
.../hadoop/hive/metastore/TestObjectStore.java | 55 +-
.../hadoop/hive/metastore/hbase/MockUtils.java | 211 +
.../hbase/TestHBaseAggregateStatsCache.java | 316 +
.../hbase/TestHBaseFilterPlanUtil.java | 483 +
.../hive/metastore/hbase/TestHBaseStore.java | 1307 +
.../metastore/hbase/TestHBaseStoreCached.java | 378 +
.../hbase/TestSharedStorageDescriptor.java | 153 +
packaging/pom.xml | 15 +
packaging/src/main/assembly/bin.xml | 25 +
pom.xml | 255 +-
ql/pom.xml | 163 +-
.../gen/thrift/gen-cpp/queryplan_constants.cpp | 2 +-
ql/src/gen/thrift/gen-cpp/queryplan_constants.h | 2 +-
ql/src/gen/thrift/gen-cpp/queryplan_types.cpp | 162 +-
ql/src/gen/thrift/gen-cpp/queryplan_types.h | 79 +-
.../hadoop/hive/ql/plan/api/Adjacency.java | 4 +-
.../hadoop/hive/ql/plan/api/AdjacencyType.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Graph.java | 4 +-
.../hadoop/hive/ql/plan/api/NodeType.java | 2 +-
.../hadoop/hive/ql/plan/api/Operator.java | 8 +-
.../hadoop/hive/ql/plan/api/OperatorType.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Query.java | 8 +-
.../hadoop/hive/ql/plan/api/QueryPlan.java | 8 +-
.../apache/hadoop/hive/ql/plan/api/Stage.java | 8 +-
.../hadoop/hive/ql/plan/api/StageType.java | 2 +-
.../apache/hadoop/hive/ql/plan/api/Task.java | 8 +-
.../hadoop/hive/ql/plan/api/TaskType.java | 2 +-
ql/src/gen/thrift/gen-php/Types.php | 4 +-
ql/src/gen/thrift/gen-py/queryplan/constants.py | 2 +-
ql/src/gen/thrift/gen-py/queryplan/ttypes.py | 80 +-
ql/src/gen/thrift/gen-rb/queryplan_constants.rb | 2 +-
ql/src/gen/thrift/gen-rb/queryplan_types.rb | 2 +-
.../ExpressionTemplates/IfExprColumnColumn.txt | 186 -
.../org/apache/hadoop/hive/llap/DebugUtils.java | 78 +
.../org/apache/hadoop/hive/llap/LogLevels.java | 53 +
.../java/org/apache/hadoop/hive/ql/Driver.java | 130 +-
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 6 +-
.../apache/hadoop/hive/ql/QueryProperties.java | 10 -
.../hive/ql/exec/AbstractFileMergeOperator.java | 5 +-
.../hive/ql/exec/AbstractMapJoinOperator.java | 6 +-
.../hive/ql/exec/AppMasterEventOperator.java | 5 +-
.../hadoop/hive/ql/exec/CollectOperator.java | 5 +-
.../apache/hadoop/hive/ql/exec/ColumnInfo.java | 2 +-
.../hadoop/hive/ql/exec/CommonJoinOperator.java | 5 +-
.../hive/ql/exec/CommonMergeJoinOperator.java | 6 +-
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 355 +-
.../hadoop/hive/ql/exec/DemuxOperator.java | 6 +-
.../hadoop/hive/ql/exec/DummyStoreOperator.java | 5 +-
.../hadoop/hive/ql/exec/FetchOperator.java | 7 +-
.../hadoop/hive/ql/exec/FileSinkOperator.java | 69 +-
.../hadoop/hive/ql/exec/FilterOperator.java | 5 +-
.../hadoop/hive/ql/exec/ForwardOperator.java | 4 +-
.../hadoop/hive/ql/exec/FunctionRegistry.java | 100 +-
.../hive/ql/exec/GlobalWorkMapFactory.java | 105 +
.../hadoop/hive/ql/exec/GroupByOperator.java | 5 +-
.../hive/ql/exec/HashTableDummyOperator.java | 5 +-
.../hive/ql/exec/HashTableSinkOperator.java | 6 +-
.../hadoop/hive/ql/exec/JoinOperator.java | 5 +-
.../ql/exec/LateralViewForwardOperator.java | 4 +-
.../hive/ql/exec/LateralViewJoinOperator.java | 6 +-
.../hadoop/hive/ql/exec/LimitOperator.java | 5 +-
.../hadoop/hive/ql/exec/ListSinkOperator.java | 5 +-
.../hadoop/hive/ql/exec/MapJoinOperator.java | 31 +-
.../apache/hadoop/hive/ql/exec/MapOperator.java | 20 +-
.../apache/hadoop/hive/ql/exec/MoveTask.java | 13 +-
.../apache/hadoop/hive/ql/exec/MuxOperator.java | 5 +-
.../hadoop/hive/ql/exec/ObjectCacheFactory.java | 51 +-
.../hadoop/hive/ql/exec/ObjectCacheWrapper.java | 57 +
.../apache/hadoop/hive/ql/exec/Operator.java | 70 +-
.../hadoop/hive/ql/exec/OperatorUtils.java | 21 +
.../apache/hadoop/hive/ql/exec/PTFOperator.java | 5 +-
.../hadoop/hive/ql/exec/ReduceSinkOperator.java | 28 +-
.../hadoop/hive/ql/exec/SMBMapJoinOperator.java | 5 +-
.../hadoop/hive/ql/exec/ScriptOperator.java | 5 +-
.../hadoop/hive/ql/exec/SelectOperator.java | 7 +-
.../ql/exec/SparkHashTableSinkOperator.java | 8 +-
.../hadoop/hive/ql/exec/StatsNoJobTask.java | 25 +-
.../apache/hadoop/hive/ql/exec/StatsTask.java | 59 +-
.../hadoop/hive/ql/exec/TableScanOperator.java | 30 +-
.../org/apache/hadoop/hive/ql/exec/Task.java | 8 +
.../apache/hadoop/hive/ql/exec/TopNHash.java | 2 +-
.../hadoop/hive/ql/exec/UDTFOperator.java | 5 +-
.../hadoop/hive/ql/exec/UnionOperator.java | 5 +-
.../apache/hadoop/hive/ql/exec/Utilities.java | 135 +-
.../hadoop/hive/ql/exec/mr/ExecDriver.java | 16 +-
.../hadoop/hive/ql/exec/mr/ExecMapper.java | 2 +-
.../hadoop/hive/ql/exec/mr/ExecReducer.java | 3 +-
.../persistence/MapJoinBytesTableContainer.java | 4 +-
.../ql/exec/persistence/PTFRowContainer.java | 14 +-
.../hive/ql/exec/persistence/RowContainer.java | 12 +-
.../ql/exec/spark/RemoteHiveSparkClient.java | 22 +
.../ql/exec/spark/SparkMapRecordHandler.java | 2 +-
.../hadoop/hive/ql/exec/spark/SparkPlan.java | 3 +-
.../hive/ql/exec/spark/SparkPlanGenerator.java | 31 +-
.../hive/ql/exec/spark/SparkRecordHandler.java | 3 +-
.../ql/exec/spark/SparkReduceRecordHandler.java | 2 +-
.../hadoop/hive/ql/exec/spark/SparkTask.java | 2 +-
.../ql/exec/spark/status/SparkJobMonitor.java | 2 +-
.../ql/exec/tez/ColumnarSplitSizeEstimator.java | 59 +
.../hive/ql/exec/tez/CustomPartitionVertex.java | 6 +-
.../hadoop/hive/ql/exec/tez/DagUtils.java | 35 +-
.../hive/ql/exec/tez/HashTableLoader.java | 25 +-
.../hive/ql/exec/tez/HiveSplitGenerator.java | 62 +-
.../hadoop/hive/ql/exec/tez/InPlaceUpdates.java | 6 +-
.../hive/ql/exec/tez/LlapObjectCache.java | 141 +
.../hive/ql/exec/tez/MapRecordProcessor.java | 95 +-
.../hive/ql/exec/tez/MapRecordSource.java | 18 +
.../ql/exec/tez/MergeFileRecordProcessor.java | 15 +-
.../hive/ql/exec/tez/RecordProcessor.java | 28 +-
.../hive/ql/exec/tez/ReduceRecordProcessor.java | 46 +-
.../hive/ql/exec/tez/ReduceRecordSource.java | 3 +-
.../hadoop/hive/ql/exec/tez/SplitGrouper.java | 32 +-
.../hadoop/hive/ql/exec/tez/TezJobMonitor.java | 67 +-
.../hadoop/hive/ql/exec/tez/TezProcessor.java | 43 +-
.../hive/ql/exec/tez/TezSessionPoolManager.java | 40 +-
.../hive/ql/exec/tez/TezSessionState.java | 271 +-
.../apache/hadoop/hive/ql/exec/tez/TezTask.java | 71 +-
.../ql/exec/tez/tools/KeyValuesInputMerger.java | 1 -
.../vector/VectorAppMasterEventOperator.java | 8 +-
.../ql/exec/vector/VectorFileSinkOperator.java | 9 +-
.../ql/exec/vector/VectorFilterOperator.java | 6 +-
.../ql/exec/vector/VectorGroupByOperator.java | 11 +-
.../exec/vector/VectorMapJoinBaseOperator.java | 7 +-
.../ql/exec/vector/VectorMapJoinOperator.java | 6 +-
.../VectorMapJoinOuterFilteredOperator.java | 6 +-
.../exec/vector/VectorReduceSinkOperator.java | 7 +-
.../exec/vector/VectorSMBMapJoinOperator.java | 20 +-
.../ql/exec/vector/VectorSelectOperator.java | 7 +-
.../VectorSparkHashTableSinkOperator.java | 7 +-
...VectorSparkPartitionPruningSinkOperator.java | 7 +-
.../ql/exec/vector/VectorizationContext.java | 296 +-
.../ql/exec/vector/VectorizedBatchUtil.java | 183 +-
.../ql/exec/vector/VectorizedRowBatchCtx.java | 55 +-
.../expressions/FilterStringColumnInList.java | 13 +-
.../expressions/FilterStructColumnInList.java | 178 +
.../exec/vector/expressions/IStructInExpr.java | 36 +
.../IfExprDoubleColumnDoubleColumn.java | 167 +
.../expressions/IfExprLongColumnLongColumn.java | 166 +
.../expressions/LongColEqualLongColumn.java | 169 +
.../expressions/LongColEqualLongScalar.java | 151 +
.../LongColGreaterEqualLongColumn.java | 169 +
.../LongColGreaterEqualLongScalar.java | 151 +
.../expressions/LongColGreaterLongColumn.java | 169 +
.../expressions/LongColGreaterLongScalar.java | 151 +
.../expressions/LongColLessEqualLongColumn.java | 169 +
.../expressions/LongColLessEqualLongScalar.java | 151 +
.../expressions/LongColLessLongColumn.java | 169 +
.../expressions/LongColLessLongScalar.java | 151 +
.../expressions/LongColNotEqualLongColumn.java | 169 +
.../expressions/LongColNotEqualLongScalar.java | 151 +
.../expressions/LongScalarEqualLongColumn.java | 151 +
.../LongScalarGreaterEqualLongColumn.java | 151 +
.../LongScalarGreaterLongColumn.java | 151 +
.../LongScalarLessEqualLongColumn.java | 151 +
.../expressions/LongScalarLessLongColumn.java | 151 +
.../LongScalarNotEqualLongColumn.java | 151 +
.../ql/exec/vector/expressions/NullUtil.java | 27 +
.../vector/expressions/StringColumnInList.java | 4 +
.../vector/expressions/StructColumnInList.java | 174 +
.../mapjoin/VectorMapJoinCommonOperator.java | 9 +-
.../hadoop/hive/ql/hooks/LineageLogger.java | 93 +-
.../hive/ql/index/AggregateIndexHandler.java | 1 -
.../hive/ql/index/TableBasedIndexHandler.java | 7 -
.../ql/index/bitmap/BitmapIndexHandler.java | 1 -
.../ql/index/compact/CompactIndexHandler.java | 1 -
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 15 +-
.../apache/hadoop/hive/ql/io/ColumnarSplit.java | 33 +
.../hive/ql/io/CombineHiveInputFormat.java | 10 +-
.../hive/ql/io/DefaultHivePartitioner.java | 3 +-
.../org/apache/hadoop/hive/ql/io/HdfsUtils.java | 61 +
.../hadoop/hive/ql/io/HiveFileFormatUtils.java | 95 +-
.../hadoop/hive/ql/io/HiveInputFormat.java | 60 +-
.../apache/hadoop/hive/ql/io/IOContextMap.java | 39 +-
.../hadoop/hive/ql/io/InputFormatChecker.java | 5 +-
.../io/LlapWrappableInputFormatInterface.java | 22 +
.../hadoop/hive/ql/io/RCFileInputFormat.java | 3 +-
.../ql/io/SequenceFileInputFormatChecker.java | 3 +-
.../hive/ql/io/VectorizedRCFileInputFormat.java | 3 +-
.../ql/io/avro/AvroGenericRecordReader.java | 27 +-
.../hadoop/hive/ql/io/orc/BitFieldReader.java | 106 +-
.../hive/ql/io/orc/ColumnStatisticsImpl.java | 55 +-
.../hadoop/hive/ql/io/orc/DataReader.java | 58 +
.../hadoop/hive/ql/io/orc/DynamicByteArray.java | 2 +-
.../apache/hadoop/hive/ql/io/orc/FileDump.java | 197 +-
.../hadoop/hive/ql/io/orc/FileMetaInfo.java | 64 +
.../hadoop/hive/ql/io/orc/FileMetadata.java | 63 +
.../apache/hadoop/hive/ql/io/orc/InStream.java | 93 +-
.../hadoop/hive/ql/io/orc/IntegerReader.java | 5 +-
.../hadoop/hive/ql/io/orc/JsonFileDump.java | 216 +-
.../apache/hadoop/hive/ql/io/orc/Metadata.java | 45 -
.../hadoop/hive/ql/io/orc/MetadataReader.java | 105 +-
.../hive/ql/io/orc/MetadataReaderImpl.java | 123 +
.../apache/hadoop/hive/ql/io/orc/OrcFile.java | 75 +-
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 531 +-
.../hadoop/hive/ql/io/orc/OrcNewSplit.java | 6 +-
.../hadoop/hive/ql/io/orc/OrcOutputFormat.java | 145 +-
.../hive/ql/io/orc/OrcRawRecordMerger.java | 3 +
.../hadoop/hive/ql/io/orc/OrcRecordUpdater.java | 34 +-
.../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 18 +-
.../apache/hadoop/hive/ql/io/orc/OrcUtils.java | 177 +-
.../apache/hadoop/hive/ql/io/orc/OutStream.java | 4 +-
.../apache/hadoop/hive/ql/io/orc/Reader.java | 48 +-
.../hadoop/hive/ql/io/orc/ReaderImpl.java | 288 +-
.../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 228 +-
.../hive/ql/io/orc/RecordReaderUtils.java | 101 +-
.../hive/ql/io/orc/RunLengthByteReader.java | 28 +-
.../hive/ql/io/orc/RunLengthIntegerReader.java | 28 +-
.../ql/io/orc/RunLengthIntegerReaderV2.java | 76 +-
.../hive/ql/io/orc/SerializationUtils.java | 6 +-
.../ql/io/orc/SettableUncompressedStream.java | 44 +
.../hadoop/hive/ql/io/orc/StreamName.java | 2 +-
.../hive/ql/io/orc/TreeReaderFactory.java | 104 +-
.../hadoop/hive/ql/io/orc/TypeDescription.java | 466 +
.../ql/io/orc/VectorizedOrcInputFormat.java | 2 +-
.../apache/hadoop/hive/ql/io/orc/Writer.java | 9 +
.../hadoop/hive/ql/io/orc/WriterImpl.java | 549 +-
.../hive/ql/io/orc/encoded/CacheChunk.java | 69 +
.../hadoop/hive/ql/io/orc/encoded/Consumer.java | 30 +
.../hive/ql/io/orc/encoded/EncodedOrcFile.java | 30 +
.../hive/ql/io/orc/encoded/EncodedReader.java | 59 +
.../ql/io/orc/encoded/EncodedReaderImpl.java | 1326 +
.../orc/encoded/EncodedTreeReaderFactory.java | 1924 +
.../hive/ql/io/orc/encoded/OrcBatchKey.java | 60 +
.../hive/ql/io/orc/encoded/OrcCacheKey.java | 58 +
.../hadoop/hive/ql/io/orc/encoded/Reader.java | 72 +
.../hive/ql/io/orc/encoded/ReaderImpl.java | 42 +
.../hive/ql/io/orc/encoded/StreamUtils.java | 71 +
.../serde/ArrayWritableObjectInspector.java | 7 +
.../ql/io/rcfile/stats/PartialScanMapper.java | 12 +-
.../ql/io/rcfile/stats/PartialScanTask.java | 11 +-
.../ql/io/rcfile/stats/PartialScanWork.java | 14 +
.../hive/ql/io/sarg/ConvertAstToSearchArg.java | 11 +-
.../apache/hadoop/hive/ql/lib/RuleRegExp.java | 61 +-
.../hadoop/hive/ql/lockmgr/DbLockManager.java | 21 +
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 34 +-
.../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 3 +-
.../hadoop/hive/ql/lockmgr/HiveLockObject.java | 6 +-
.../zookeeper/ZooKeeperHiveLockManager.java | 41 +
.../apache/hadoop/hive/ql/log/PerfLogger.java | 195 -
.../apache/hadoop/hive/ql/metadata/Hive.java | 262 +-
.../hadoop/hive/ql/metadata/Partition.java | 29 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 2 +-
.../apache/hadoop/hive/ql/metadata/Table.java | 5 +-
.../formatting/MetaDataPrettyFormatUtils.java | 24 +-
.../hive/ql/optimizer/ColumnPrunerProcCtx.java | 2 +-
.../ql/optimizer/ColumnPrunerProcFactory.java | 9 +-
.../optimizer/ConstantPropagateProcFactory.java | 11 +-
.../hive/ql/optimizer/ConvertJoinMapJoin.java | 103 +-
.../hive/ql/optimizer/GenMRTableScan1.java | 3 +
.../hive/ql/optimizer/GenMapRedUtils.java | 56 +-
.../hive/ql/optimizer/SimpleFetchOptimizer.java | 11 +-
.../optimizer/SortedDynPartitionOptimizer.java | 7 +-
.../hive/ql/optimizer/StatsOptimizer.java | 84 +-
.../ql/optimizer/calcite/HiveCalciteUtil.java | 12 +-
.../ql/optimizer/calcite/HiveConfigContext.java | 37 -
.../calcite/HiveHepPlannerContext.java | 37 +
.../calcite/HiveVolcanoPlannerContext.java | 37 +
.../ql/optimizer/calcite/RelOptHiveTable.java | 15 +
.../calcite/cost/HiveVolcanoPlanner.java | 6 +-
.../functions/HiveSqlCountAggFunction.java | 72 +
.../functions/HiveSqlMinMaxAggFunction.java | 49 +
.../functions/HiveSqlSumAggFunction.java | 125 +
.../calcite/reloperators/HiveBetween.java | 75 +
.../optimizer/calcite/reloperators/HiveIn.java | 41 +
.../rules/HiveAggregateJoinTransposeRule.java | 372 +
.../calcite/rules/HivePreFilteringRule.java | 135 +-
.../calcite/rules/HiveRulesRegistry.java | 44 +
.../calcite/translator/HiveGBOpConvUtil.java | 43 +-
.../translator/PlanModifierForASTConv.java | 11 +
.../translator/PlanModifierForReturnPath.java | 26 +
.../translator/SqlFunctionConverter.java | 56 +-
.../correlation/CorrelationUtilities.java | 33 -
.../correlation/ReduceSinkDeDuplication.java | 15 +-
.../RewriteQueryUsingAggregateIndexCtx.java | 2 +-
.../hive/ql/optimizer/lineage/LineageCtx.java | 9 +-
.../hive/ql/optimizer/physical/LlapDecider.java | 447 +
.../hive/ql/optimizer/physical/Vectorizer.java | 91 +-
.../ql/optimizer/physical/Vectorizer.java.orig | 1744 +
.../ppr/PartitionExpressionForMetastore.java | 40 +
.../hive/ql/optimizer/ppr/PartitionPruner.java | 33 +-
.../hadoop/hive/ql/parse/CalcitePlanner.java | 44 +-
.../ql/parse/ColumnStatsSemanticAnalyzer.java | 16 +-
.../hive/ql/parse/DDLSemanticAnalyzer.java | 17 +
.../hadoop/hive/ql/parse/FromClauseParser.g | 30 +-
.../hadoop/hive/ql/parse/GenTezUtils.java | 2 +-
.../org/apache/hadoop/hive/ql/parse/HiveLexer.g | 1 -
.../apache/hadoop/hive/ql/parse/HiveParser.g | 17 +-
.../hive/ql/parse/LoadSemanticAnalyzer.java | 12 -
.../hive/ql/parse/ProcessAnalyzeTable.java | 4 +-
.../hadoop/hive/ql/parse/QBParseInfo.java | 9 -
.../apache/hadoop/hive/ql/parse/QBSubQuery.java | 7 -
.../hadoop/hive/ql/parse/SelectClauseParser.g | 1 -
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 149 +-
.../hive/ql/parse/SemanticAnalyzerFactory.java | 2 +
.../hadoop/hive/ql/parse/SplitSample.java | 4 +
.../hadoop/hive/ql/parse/SubQueryUtils.java | 11 -
.../hadoop/hive/ql/parse/TaskCompiler.java | 1 +
.../hadoop/hive/ql/parse/TezCompiler.java | 7 +
.../hive/ql/parse/VariableSubstitution.java | 60 -
.../hive/ql/parse/spark/SparkCompiler.java | 3 +-
.../SparkPartitionPruningSinkOperator.java | 5 +-
.../parse/spark/SparkProcessAnalyzeTable.java | 2 +
.../apache/hadoop/hive/ql/plan/BaseWork.java | 19 +
.../org/apache/hadoop/hive/ql/plan/DDLWork.java | 21 +
.../hive/ql/plan/DynamicPartitionCtx.java | 27 -
.../apache/hadoop/hive/ql/plan/ExplainWork.java | 4 +-
.../hive/ql/plan/ExprNodeGenericFuncDesc.java | 10 +-
.../hadoop/hive/ql/plan/FileSinkDesc.java | 16 +-
.../hadoop/hive/ql/plan/HiveOperation.java | 1 +
.../hadoop/hive/ql/plan/LoadTableDesc.java | 14 -
.../org/apache/hadoop/hive/ql/plan/MapWork.java | 50 +-
.../hadoop/hive/ql/plan/MergeJoinWork.java | 34 +-
.../apache/hadoop/hive/ql/plan/PlanUtils.java | 3 +-
.../apache/hadoop/hive/ql/plan/ReduceWork.java | 20 +-
.../hive/ql/plan/ShowCreateDatabaseDesc.java | 94 +
.../apache/hadoop/hive/ql/plan/StatsWork.java | 15 +-
.../hadoop/hive/ql/plan/TableScanDesc.java | 12 +-
.../org/apache/hadoop/hive/ql/plan/TezWork.java | 17 +-
.../hadoop/hive/ql/plan/VectorGroupByDesc.java | 10 +
.../hadoop/hive/ql/ppd/ExprWalkerInfo.java | 23 +-
.../hadoop/hive/ql/ppd/OpProcFactory.java | 10 +-
.../ql/processors/AddResourceProcessor.java | 11 +-
.../ql/processors/CommandProcessorFactory.java | 3 +
.../hive/ql/processors/CompileProcessor.java | 11 +-
.../ql/processors/DeleteResourceProcessor.java | 11 +-
.../hadoop/hive/ql/processors/DfsProcessor.java | 11 +-
.../hadoop/hive/ql/processors/SetProcessor.java | 62 +-
.../AuthorizationPreEventListener.java | 2 +-
.../StorageBasedAuthorizationProvider.java | 10 +-
.../authorization/plugin/HiveOperationType.java | 1 +
.../plugin/sqlstd/Operation2Privilege.java | 2 +
.../hadoop/hive/ql/session/SessionState.java | 162 +-
.../hive/ql/stats/CounterStatsAggregator.java | 15 +-
.../ql/stats/CounterStatsAggregatorSpark.java | 13 +-
.../ql/stats/CounterStatsAggregatorTez.java | 17 +-
.../hive/ql/stats/CounterStatsPublisher.java | 9 +-
.../hadoop/hive/ql/stats/StatsAggregator.java | 23 +-
.../hive/ql/stats/StatsCollectionContext.java | 63 +
.../stats/StatsCollectionTaskIndependent.java | 25 -
.../hadoop/hive/ql/stats/StatsFactory.java | 3 +-
.../hadoop/hive/ql/stats/StatsPublisher.java | 8 +-
.../apache/hadoop/hive/ql/stats/StatsUtils.java | 185 +-
.../hive/ql/stats/fs/FSStatsAggregator.java | 31 +-
.../hive/ql/stats/fs/FSStatsPublisher.java | 35 +-
.../hive/ql/stats/jdbc/JDBCStatsAggregator.java | 264 -
.../hive/ql/stats/jdbc/JDBCStatsPublisher.java | 338 -
.../ql/stats/jdbc/JDBCStatsSetupConstants.java | 39 -
.../hive/ql/stats/jdbc/JDBCStatsUtils.java | 212 -
.../hive/ql/txn/AcidHouseKeeperService.java | 5 +-
.../hadoop/hive/ql/txn/compactor/Cleaner.java | 3 +-
.../hive/ql/txn/compactor/CompactorMR.java | 104 +-
.../hive/ql/txn/compactor/CompactorThread.java | 9 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 5 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 6 +-
.../org/apache/hadoop/hive/ql/udf/UDFJson.java | 2 +
.../hive/ql/udf/generic/GenericUDAFSum.java | 2 +-
.../udf/generic/GenericUDAFSumEmptyIsZero.java | 63 +
.../udf/generic/GenericUDFBaseNwayCompare.java | 115 +
.../hive/ql/udf/generic/GenericUDFGreatest.java | 75 +-
.../hive/ql/udf/generic/GenericUDFHash.java | 11 +-
.../hive/ql/udf/generic/GenericUDFIf.java | 4 +-
.../hive/ql/udf/generic/GenericUDFLeast.java | 9 +-
.../hive/ql/udf/generic/GenericUDFOPEqual.java | 3 +
.../generic/GenericUDFOPEqualOrGreaterThan.java | 3 +
.../generic/GenericUDFOPEqualOrLessThan.java | 3 +
.../ql/udf/generic/GenericUDFOPGreaterThan.java | 3 +
.../ql/udf/generic/GenericUDFOPLessThan.java | 3 +
.../ql/udf/generic/GenericUDFOPNotEqual.java | 3 +
.../hive/ql/udf/generic/GenericUDFUtils.java | 3 +-
.../hadoop/hive/ql/util/JavaDataModel.java | 75 +-
.../main/resources/hive-exec-log4j.properties | 77 +
ql/src/main/resources/hive-exec-log4j2.xml | 4 +-
.../hadoop/hive/ql/io/orc/orc_proto.proto | 1 +
.../hive/metastore/TestMetastoreExpr.java | 2 +-
.../hadoop/hive/ql/exec/TestExecDriver.java | 2 +-
.../hive/ql/exec/TestFileSinkOperator.java | 405 +-
.../ql/exec/TestStatsPublisherEnhanced.java | 377 -
.../persistence/TestBytesBytesMultiHashMap.java | 3 +
.../ql/exec/persistence/TestHashPartition.java | 29 +
.../exec/persistence/TestPTFRowContainer.java | 31 +-
.../hive/ql/exec/tez/TestTezSessionPool.java | 79 +-
.../hadoop/hive/ql/exec/tez/TestTezTask.java | 2 +
.../exec/vector/TestVectorizationContext.java | 102 +-
.../TestVectorConditionalExpressions.java | 3 +-
.../vector/util/FakeCaptureOutputOperator.java | 5 +-
.../util/FakeVectorDataSourceOperator.java | 4 +-
.../hadoop/hive/ql/io/TestIOContextMap.java | 76 +-
.../ql/io/avro/TestAvroGenericRecordReader.java | 59 +
.../hive/ql/io/orc/TestBitFieldReader.java | 11 +-
.../hadoop/hive/ql/io/orc/TestBitPack.java | 2 +-
.../hive/ql/io/orc/TestColumnStatistics.java | 45 +-
.../hadoop/hive/ql/io/orc/TestFileDump.java | 50 -
.../hadoop/hive/ql/io/orc/TestInStream.java | 20 +-
.../hive/ql/io/orc/TestInputOutputFormat.java | 45 +-
.../ql/io/orc/TestIntegerCompressionReader.java | 5 +-
.../hadoop/hive/ql/io/orc/TestOrcFile.java | 58 +-
.../hive/ql/io/orc/TestOrcRawRecordMerger.java | 2 +-
.../hadoop/hive/ql/io/orc/TestOrcWideTable.java | 246 +-
.../hive/ql/io/orc/TestRecordReaderImpl.java | 2 +-
.../hive/ql/io/orc/TestRunLengthByteReader.java | 10 +-
.../ql/io/orc/TestRunLengthIntegerReader.java | 8 +-
.../hive/ql/io/orc/TestTypeDescription.java | 67 +
.../hive/ql/io/sarg/TestSearchArgumentImpl.java | 87 +-
.../hive/ql/lockmgr/TestDbTxnManager.java | 8 +-
.../zookeeper/TestZookeeperLockManager.java | 50 +
.../hadoop/hive/ql/metadata/StringAppender.java | 2 +-
.../hadoop/hive/ql/metadata/TestHive.java | 2 +-
.../calcite/TestCBORuleFiredOnlyOnce.java | 168 +
.../TestNegativePartitionPrunerCompactExpr.java | 27 +
.../TestPositivePartitionPrunerCompactExpr.java | 115 +
.../hadoop/hive/ql/parse/TestSplitSample.java | 60 +
.../hive/ql/processors/TestSetProcessor.java | 54 +
.../hive/ql/txn/compactor/CompactorTest.java | 6 +-
.../hive/ql/txn/compactor/TestWorker.java | 120 +-
.../ql/udf/generic/TestGenericUDFGreatest.java | 153 +-
.../ql/udf/generic/TestGenericUDFLeast.java | 149 +-
ql/src/test/queries/clientcompare/llap_0.q | 12 +
ql/src/test/queries/clientcompare/llap_0_00.qv | 1 +
ql/src/test/queries/clientcompare/llap_0_01.qv | 1 +
.../test/queries/clientnegative/ctasnullcol.q | 2 +
ql/src/test/queries/clientnegative/ddltime.q | 6 -
.../special_character_in_tabnames_1.q | 13 +
.../clientnegative/udf_greatest_error_2.q | 2 +-
.../clientnegative/udf_greatest_error_3.q | 1 -
.../clientnegative/udf_greatest_error_4.q | 1 -
.../acid_vectorization_partition.q | 2 +-
.../test/queries/clientpositive/add_jar_pfile.q | 8 +
.../clientpositive/alter_table_update_status.q | 4 +-
.../queries/clientpositive/analyze_tbl_part.q | 4 +-
.../clientpositive/auto_sortmerge_join_6.q | 1 +
.../queries/clientpositive/avrocountemptytbl.q | 8 +
.../cbo_rp_gby2_map_multi_distinct.q | 38 +
.../queries/clientpositive/cbo_rp_lineage2.q | 117 +
.../cbo_rp_udaf_percentile_approx_23.q | 97 +
.../test/queries/clientpositive/constprog_dpp.q | 3 +-
ql/src/test/queries/clientpositive/cross_join.q | 8 +
ql/src/test/queries/clientpositive/ddltime.q | 45 -
.../test/queries/clientpositive/decimal_1_1.q | 9 +
.../clientpositive/drop_table_with_index.q | 35 +
.../dynpart_sort_opt_vectorization.q | 4 +-
.../clientpositive/dynpart_sort_optimization.q | 4 +-
.../clientpositive/dynpart_sort_optimization2.q | 2 -
.../test/queries/clientpositive/escape_crlf.q | 19 +
.../test/queries/clientpositive/explainuser_1.q | 2 +
.../clientpositive/filter_cond_pushdown.q | 5 +
.../clientpositive/groupby_join_pushdown.q | 55 +
.../identity_project_remove_skip.q | 1 +
.../infer_bucket_sort_multi_insert.q | 1 +
ql/src/test/queries/clientpositive/insert1.q | 2 +
.../test/queries/clientpositive/insert_into1.q | 18 +-
.../test/queries/clientpositive/insert_into2.q | 8 +
.../clientpositive/insertvalues_espchars.q | 5 +
ql/src/test/queries/clientpositive/join44.q | 12 +
.../queries/clientpositive/join_grp_diff_keys.q | 21 +
ql/src/test/queries/clientpositive/join_parse.q | 20 +
.../test/queries/clientpositive/json_serde1.q | 36 +
.../test/queries/clientpositive/lb_fs_stats.q | 2 -
ql/src/test/queries/clientpositive/lineage3.q | 26 +
.../queries/clientpositive/llap_partitioned.q | 66 +
.../queries/clientpositive/llap_uncompressed.q | 48 +
.../test/queries/clientpositive/llapdecider.q | 64 +
.../queries/clientpositive/load_non_hdfs_path.q | 6 +
.../test/queries/clientpositive/load_orc_part.q | 5 +
.../clientpositive/metadata_only_queries.q | 17 +-
.../metadata_only_queries_with_filters.q | 2 +-
ql/src/test/queries/clientpositive/mrr.q | 8 +-
.../test/queries/clientpositive/multi_insert.q | 2 +-
.../queries/clientpositive/multi_insert_gby2.q | 2 +-
.../queries/clientpositive/multi_insert_gby3.q | 2 +-
.../clientpositive/multi_insert_lateral_view.q | 1 +
.../queries/clientpositive/multi_insert_mixed.q | 2 +-
...multi_insert_move_tasks_share_dependencies.q | 2 +-
.../clientpositive/multi_insert_union_src.q | 2 +-
ql/src/test/queries/clientpositive/nullMap.q | 14 +
ql/src/test/queries/clientpositive/orc_llap.q | 148 +
.../parquet_mixed_partition_formats2.q | 31 +
.../clientpositive/parquet_ppd_boolean.q | 4 +-
.../queries/clientpositive/parquet_ppd_char.q | 12 +-
.../queries/clientpositive/parquet_ppd_date.q | 16 +-
.../clientpositive/parquet_ppd_decimal.q | 32 +-
.../clientpositive/parquet_ppd_timestamp.q | 16 +-
.../clientpositive/parquet_ppd_varchar.q | 12 +-
.../test/queries/clientpositive/scriptfile1.q | 3 +
.../test/queries/clientpositive/selectindate.q | 9 +
ql/src/test/queries/clientpositive/show_conf.q | 2 +-
.../clientpositive/show_create_database.q | 3 +
.../clientpositive/skewjoin_onesideskew.q | 22 +
.../special_character_in_tabnames_1.q | 1075 +
.../special_character_in_tabnames_2.q | 40 +
ql/src/test/queries/clientpositive/stats19.q | 4 +-
.../queries/clientpositive/stats_only_null.q | 1 -
ql/src/test/queries/clientpositive/statsfs.q | 2 -
.../queries/clientpositive/subquery_views.q | 22 +-
ql/src/test/queries/clientpositive/temp_table.q | 26 +
ql/src/test/queries/clientpositive/tez_fsstat.q | 2 -
ql/src/test/queries/clientpositive/topn.q | 13 +
.../test/queries/clientpositive/udf_greatest.q | 20 +
ql/src/test/queries/clientpositive/udf_least.q | 20 +
.../queries/clientpositive/udtf_nofetchtask.q | 10 +
ql/src/test/queries/clientpositive/union36.q | 10 +
.../queries/clientpositive/unionDistinct_1.q | 5 +
.../queries/clientpositive/union_fast_stats.q | 68 +
ql/src/test/queries/clientpositive/union_view.q | 2 +
.../queries/clientpositive/update_all_types.q | 2 +-
.../clientpositive/vector_auto_smb_mapjoin_14.q | 297 +
.../queries/clientpositive/vector_char_cast.q | 9 +
.../queries/clientpositive/vector_coalesce.q | 22 +
.../clientpositive/vector_groupby_reduce.q | 62 +-
.../clientpositive/vector_leftsemi_mapjoin.q | 4 +-
.../clientpositive/vector_multi_insert.q | 2 +
ql/src/test/queries/clientpositive/vector_nvl.q | 36 +
.../queries/clientpositive/vector_struct_in.q | 247 +
.../clientpositive/vectorization_limit.q | 4 +-
.../clientpositive/windowing_windowspec2.q | 16 +-
.../resources/orc-file-dump-bloomfilter.out | 114 +-
.../resources/orc-file-dump-bloomfilter2.out | 146 +-
.../orc-file-dump-dictionary-threshold.out | 182 +-
ql/src/test/resources/orc-file-dump.json | 189 +-
ql/src/test/resources/orc-file-dump.out | 160 +-
ql/src/test/resources/orc-file-has-null.out | 82 +-
.../results/clientnegative/ctasnullcol.q.out | 5 +
.../clientnegative/cte_with_in_subquery.q.out | 2 +-
.../test/results/clientnegative/ddltime.q.out | 9 -
.../results/clientnegative/insertsel_fail.q.out | 2 +-
.../special_character_in_tabnames_1.q.out | 10 +
.../subquery_exists_implicit_gby.q.out | 8 +-
.../subquery_nested_subquery.q.out | 4 +-
.../subquery_notexists_implicit_gby.q.out | 8 +-
.../subquery_windowing_corr.q.out | 7 +-
.../clientnegative/udf_greatest_error_2.q.out | 2 +-
.../clientnegative/udf_greatest_error_3.q.out | 1 -
.../clientnegative/udf_greatest_error_4.q.out | 1 -
.../acid_vectorization_partition.q.out | 14 +-
.../results/clientpositive/add_jar_pfile.q.out | 12 +
.../alter_partition_coltype.q.out | 3 +
.../clientpositive/annotate_stats_part.q.out | 6 +-
.../clientpositive/annotate_stats_table.q.out | 4 +-
.../auto_join_reordering_values.q.out | 7 +-
.../clientpositive/auto_sortmerge_join_1.q.out | 5 +
.../clientpositive/auto_sortmerge_join_11.q.out | 4 +
.../clientpositive/auto_sortmerge_join_12.q.out | 1 +
.../clientpositive/auto_sortmerge_join_2.q.out | 4 +
.../clientpositive/auto_sortmerge_join_3.q.out | 5 +
.../clientpositive/auto_sortmerge_join_4.q.out | 5 +
.../clientpositive/auto_sortmerge_join_5.q.out | 5 +
.../clientpositive/auto_sortmerge_join_7.q.out | 5 +
.../clientpositive/auto_sortmerge_join_8.q.out | 5 +
.../clientpositive/avrocountemptytbl.q.out | 58 +
.../clientpositive/binary_output_format.q.out | 4 +-
.../test/results/clientpositive/bucket4.q.out | 8 +-
.../test/results/clientpositive/bucket5.q.out | 10 +-
.../results/clientpositive/bucket_many.q.out | 8 +-
.../clientpositive/bucket_map_join_1.q.out | 5 +-
.../clientpositive/bucket_map_join_2.q.out | 5 +-
.../clientpositive/bucket_map_join_spark4.q.out | 2 +
.../clientpositive/bucketcontext_1.q.out | 2 +
.../clientpositive/bucketcontext_2.q.out | 2 +
.../clientpositive/bucketcontext_3.q.out | 2 +
.../clientpositive/bucketcontext_4.q.out | 2 +
.../clientpositive/bucketcontext_5.q.out | 2 +
.../clientpositive/bucketcontext_6.q.out | 2 +
.../clientpositive/bucketcontext_7.q.out | 2 +
.../clientpositive/bucketcontext_8.q.out | 2 +
.../results/clientpositive/bucketmapjoin1.q.out | 10 +-
.../clientpositive/bucketmapjoin10.q.out | 1 +
.../clientpositive/bucketmapjoin11.q.out | 2 +
.../clientpositive/bucketmapjoin12.q.out | 2 +
.../clientpositive/bucketmapjoin13.q.out | 4 +
.../results/clientpositive/bucketmapjoin2.q.out | 12 +-
.../results/clientpositive/bucketmapjoin3.q.out | 8 +-
.../results/clientpositive/bucketmapjoin4.q.out | 8 +-
.../results/clientpositive/bucketmapjoin5.q.out | 8 +-
.../results/clientpositive/bucketmapjoin7.q.out | 1 +
.../results/clientpositive/bucketmapjoin8.q.out | 2 +
.../results/clientpositive/bucketmapjoin9.q.out | 2 +
.../clientpositive/bucketmapjoin_negative.q.out | 4 +-
.../bucketmapjoin_negative2.q.out | 4 +-
.../bucketmapjoin_negative3.q.out | 9 +
.../clientpositive/cbo_rp_auto_join1.q.out | 151 +-
.../cbo_rp_cross_product_check_2.q.out | 18 +-
.../cbo_rp_gby2_map_multi_distinct.q.out | 236 +
.../clientpositive/cbo_rp_lineage2.q.out | 677 +
.../cbo_rp_outer_join_ppr.q.java1.7.out | 2 +
.../cbo_rp_udaf_percentile_approx_23.q.out | 628 +
.../clientpositive/columnstats_partlvl.q.out | 2 +
.../clientpositive/columnstats_tbllvl.q.out | 2 +
.../test/results/clientpositive/combine2.q.out | 1 +
.../constantPropagateForSubQuery.q.out | 1 +
.../results/clientpositive/cross_join.q.out | 196 +
ql/src/test/results/clientpositive/ctas.q.out | 2 +-
.../test/results/clientpositive/ddltime.q.out | 188 -
.../results/clientpositive/decimal_1_1.q.out | 104 +
.../clientpositive/describe_pretty.q.out | 366 +-
.../disable_merge_for_bucketing.q.out | 12 +-
.../display_colstats_tbllvl.q.out | 1 +
.../clientpositive/drop_table_with_index.q.out | 152 +
.../clientpositive/dynamic_rdd_cache.q.out | 28 +-
.../dynpart_sort_opt_vectorization.q.out | 28 +-
.../dynpart_sort_optimization.q.out | 12 +-
.../dynpart_sort_optimization2.q.out | 8 +-
.../encryption_insert_partition_dynamic.q.out | 24 +-
.../encryption_join_unencrypted_tbl.q.out | 27 +-
...on_join_with_different_encryption_keys.q.out | 37 +-
.../results/clientpositive/escape_crlf.q.out | 98 +
.../extrapolate_part_stats_full.q.out | 24 +-
.../extrapolate_part_stats_partial.q.out | 76 +-
.../extrapolate_part_stats_partial_ndv.q.out | 38 +-
.../clientpositive/filter_cond_pushdown.q.out | 80 +
.../clientpositive/filter_join_breaktask.q.out | 3 +-
.../clientpositive/fouter_join_ppr.q.out | 4 +
.../clientpositive/groupby_join_pushdown.q.out | 1522 +
.../clientpositive/groupby_sort_1_23.q.out | 45 +-
.../clientpositive/groupby_sort_skew_1_23.q.out | 59 +-
.../infer_bucket_sort_bucketed_table.q.out | 4 +-
.../infer_bucket_sort_dyn_part.q.out | 32 +-
.../infer_bucket_sort_map_operators.q.out | 38 +-
.../infer_bucket_sort_merge.q.out | 8 +-
.../infer_bucket_sort_num_buckets.q.out | 8 +-
.../infer_bucket_sort_reducers_power_two.q.out | 24 +-
.../test/results/clientpositive/input23.q.out | 1 +
.../results/clientpositive/input_part1.q.out | 4 +-
.../results/clientpositive/input_part2.q.out | 8 +-
.../results/clientpositive/input_part7.q.out | 1 +
.../results/clientpositive/insert_into1.q.out | 355 +
.../results/clientpositive/insert_into2.q.out | 69 +
.../clientpositive/insertvalues_espchars.q.out | 30 +
ql/src/test/results/clientpositive/join26.q.out | 4 +-
.../clientpositive/join32_lessSize.q.out | 10 +-
ql/src/test/results/clientpositive/join35.q.out | 4 +-
ql/src/test/results/clientpositive/join44.q.out | 88 +
.../join_cond_pushdown_unqual1.q.out | 12 +-
.../join_cond_pushdown_unqual3.q.out | 18 +-
.../join_cond_pushdown_unqual4.q.out | 2 +-
.../clientpositive/join_filters_overlap.q.out | 5 +
.../clientpositive/join_grp_diff_keys.q.out | 190 +
.../results/clientpositive/join_map_ppr.q.out | 8 +-
.../results/clientpositive/join_parse.q.out | 516 +
.../results/clientpositive/json_serde1.q.out | 113 +
.../clientpositive/lateral_view_noalias.q.out | 120 +-
.../test/results/clientpositive/lineage3.q.out | 68 +-
.../list_bucket_dml_10.q.java1.7.out | 4 +-
.../clientpositive/list_bucket_dml_14.q.out | 1 +
.../list_bucket_dml_4.q.java1.8.out | 76 +-
.../list_bucket_dml_6.q.java1.8.out | 8 +-
.../list_bucket_dml_9.q.java1.8.out | 76 +-
.../list_bucket_query_multiskew_3.q.out | 1 +
.../list_bucket_query_oneskew_2.q.out | 2 +
.../llap/acid_vectorization.q.out | 62 +
.../llap/acid_vectorization_partition.q.out | 60 +
.../llap/acid_vectorization_project.q.out | 73 +
.../clientpositive/llap/alter_merge_2_orc.q.out | 123 +
.../clientpositive/llap/alter_merge_orc.q.out | 263 +
.../llap/alter_merge_stats_orc.q.out | 382 +
.../clientpositive/llap/auto_join0.q.out | 172 +
.../clientpositive/llap/auto_join1.q.out | 125 +
.../clientpositive/llap/auto_join21.q.out | 615 +
.../clientpositive/llap/auto_join29.q.out | 3556 ++
.../clientpositive/llap/auto_join30.q.out | 1361 +
.../clientpositive/llap/auto_join_filters.q.out | 540 +
.../clientpositive/llap/auto_join_nulls.q.out | 217 +
.../llap/auto_sortmerge_join_1.q.out | 1034 +
.../llap/auto_sortmerge_join_10.q.out | 369 +
.../llap/auto_sortmerge_join_11.q.out | 1485 +
.../llap/auto_sortmerge_join_12.q.out | 645 +
.../llap/auto_sortmerge_join_13.q.out | 692 +
.../llap/auto_sortmerge_join_14.q.out | 224 +
.../llap/auto_sortmerge_join_15.q.out | 188 +
.../llap/auto_sortmerge_join_16.q.out | 256 +
.../llap/auto_sortmerge_join_2.q.out | 707 +
.../llap/auto_sortmerge_join_3.q.out | 1014 +
.../llap/auto_sortmerge_join_4.q.out | 1030 +
.../llap/auto_sortmerge_join_5.q.out | 780 +
.../llap/auto_sortmerge_join_6.q.out | 1200 +
.../llap/auto_sortmerge_join_7.q.out | 1200 +
.../llap/auto_sortmerge_join_8.q.out | 1202 +
.../llap/auto_sortmerge_join_9.q.out | 3521 ++
.../results/clientpositive/llap/bucket2.q.out | 473 +
.../results/clientpositive/llap/bucket3.q.out | 498 +
.../results/clientpositive/llap/bucket4.q.out | 472 +
.../llap/bucket_map_join_tez1.q.out | 1602 +
.../llap/bucket_map_join_tez2.q.out | 684 +
.../results/clientpositive/llap/cbo_gby.q.out | 124 +
.../clientpositive/llap/cbo_gby_empty.q.out | 77 +
.../results/clientpositive/llap/cbo_join.q.out | 15028 +++++++
.../results/clientpositive/llap/cbo_limit.q.out | 90 +
.../clientpositive/llap/cbo_semijoin.q.out | 440 +
.../clientpositive/llap/cbo_simple_select.q.out | 755 +
.../results/clientpositive/llap/cbo_stats.q.out | 14 +
.../clientpositive/llap/cbo_subq_exists.q.out | 297 +
.../clientpositive/llap/cbo_subq_in.q.out | 151 +
.../clientpositive/llap/cbo_subq_not_in.q.out | 365 +
.../clientpositive/llap/cbo_udf_udaf.q.out | 125 +
.../results/clientpositive/llap/cbo_union.q.out | 920 +
.../results/clientpositive/llap/cbo_views.q.out | 237 +
.../clientpositive/llap/cbo_windowing.q.out | 293 +
.../clientpositive/llap/constprog_dpp.q.out | 113 +
.../llap/correlationoptimizer1.q.out | 3084 ++
.../results/clientpositive/llap/count.q.out | 298 +
.../llap/create_merge_compressed.q.out | 138 +
.../clientpositive/llap/cross_join.q.out | 214 +
.../llap/cross_product_check_1.q.out | 575 +
.../llap/cross_product_check_2.q.out | 534 +
.../test/results/clientpositive/llap/ctas.q.out | 930 +
.../llap/custom_input_output_format.q.out | 102 +
.../llap/delete_all_non_partitioned.q.out | 52 +
.../llap/delete_all_partitioned.q.out | 86 +
.../clientpositive/llap/delete_orig_table.q.out | 61 +
.../clientpositive/llap/delete_tmp_table.q.out | 60 +
.../llap/delete_where_no_match.q.out | 62 +
.../llap/delete_where_non_partitioned.q.out | 61 +
.../llap/delete_where_partitioned.q.out | 105 +
.../llap/delete_whole_partition.q.out | 92 +
.../llap/disable_merge_for_bucketing.q.out | 502 +
.../llap/dynamic_partition_pruning.q.out | 5341 +++
.../llap/dynamic_partition_pruning_2.q.out | 1114 +
.../llap/dynpart_sort_opt_vectorization.q.out | 2616 ++
.../llap/dynpart_sort_optimization.q.out | 2401 ++
.../llap/dynpart_sort_optimization2.q.out | 1844 +
.../clientpositive/llap/enforce_order.q.out | 80 +
.../clientpositive/llap/explainuser_1.q.out | 8937 +++++
.../clientpositive/llap/explainuser_2.q.out | 5521 +++
.../clientpositive/llap/explainuser_3.q.out | 522 +
.../llap/filter_join_breaktask.q.out | 445 +
.../llap/filter_join_breaktask2.q.out | 272 +
.../results/clientpositive/llap/groupby1.q.out | 428 +
.../results/clientpositive/llap/groupby2.q.out | 133 +
.../results/clientpositive/llap/groupby3.q.out | 158 +
.../results/clientpositive/llap/having.q.out | 1290 +
.../llap/hybridgrace_hashjoin_1.q.out | 1617 +
.../llap/hybridgrace_hashjoin_2.q.out | 1477 +
.../llap/identity_project_remove_skip.q.out | 124 +
.../results/clientpositive/llap/insert1.q.out | 445 +
.../llap/insert_acid_dynamic_partition.q.out | 48 +
.../llap/insert_acid_not_bucketed.q.out | 36 +
.../clientpositive/llap/insert_into1.q.out | 381 +
.../clientpositive/llap/insert_into2.q.out | 440 +
.../clientpositive/llap/insert_orig_table.q.out | 80 +
.../llap/insert_update_delete.q.out | 78 +
.../llap/insert_values_acid_not_bucketed.q.out | 28 +
.../insert_values_dynamic_partitioned.q.out | 45 +
.../llap/insert_values_non_partitioned.q.out | 70 +
.../llap/insert_values_orig_table.q.out | 82 +
.../llap/insert_values_partitioned.q.out | 66 +
.../llap/insert_values_tmp_table.q.out | 36 +
.../clientpositive/llap/join0.q.java1.7.out | 242 +
.../clientpositive/llap/join0.q.java1.8.out | 242 +
.../results/clientpositive/llap/join1.q.out | 1158 +
.../clientpositive/llap/join_nullsafe.q.out | 1667 +
.../clientpositive/llap/leftsemijoin.q.out | 114 +
.../clientpositive/llap/limit_pushdown.q.out | 1487 +
.../clientpositive/llap/llapdecider.q.out | 1195 +
.../clientpositive/llap/load_dyn_part1.q.out | 2215 ++
.../clientpositive/llap/load_dyn_part2.q.out | 2152 +
.../clientpositive/llap/load_dyn_part3.q.out | 2138 +
.../clientpositive/llap/lvj_mapjoin.q.out | 296 +
.../clientpositive/llap/mapjoin_decimal.q.out | 393 +
.../clientpositive/llap/mapjoin_mapjoin.q.out | 821 +
.../clientpositive/llap/mapreduce1.q.out | 621 +
.../clientpositive/llap/mapreduce2.q.out | 616 +
.../results/clientpositive/llap/merge1.q.out | 596 +
.../results/clientpositive/llap/merge2.q.out | 596 +
.../results/clientpositive/llap/mergejoin.q.out | 3150 ++
.../llap/metadata_only_queries.q.out | 504 +
.../metadata_only_queries_with_filters.q.out | 224 +
.../clientpositive/llap/metadataonly1.q.out | 2050 +
.../test/results/clientpositive/llap/mrr.q.out | 2294 ++
.../clientpositive/llap/optimize_nullscan.q.out | 2159 +
.../clientpositive/llap/orc_analyze.q.out | 1726 +
.../results/clientpositive/llap/orc_llap.q.out | 1013 +
.../clientpositive/llap/orc_merge1.q.out | 500 +
.../clientpositive/llap/orc_merge2.q.out | 231 +
.../clientpositive/llap/orc_merge3.q.out | 170 +
.../clientpositive/llap/orc_merge4.q.out | 186 +
.../clientpositive/llap/orc_merge5.q.out | 344 +
.../clientpositive/llap/orc_merge6.q.out | 518 +
.../clientpositive/llap/orc_merge7.q.out | 629 +
.../clientpositive/llap/orc_merge8.q.out | 130 +
.../clientpositive/llap/orc_merge9.q.out | 186 +
.../llap/orc_merge_incompat1.q.out | 245 +
.../llap/orc_merge_incompat2.q.out | 375 +
.../clientpositive/llap/orc_ppd_basic.q.out | 701 +
.../llap/orc_vectorization_ppd.q.out | 288 +
.../results/clientpositive/llap/parallel.q.out | 1444 +
.../test/results/clientpositive/llap/ptf.q.out | 4895 +++
.../clientpositive/llap/ptf_matchpath.q.out | 403 +
.../clientpositive/llap/ptf_streaming.q.out | 2640 ++
.../results/clientpositive/llap/sample1.q.out | 727 +
.../clientpositive/llap/script_env_var1.q.out | 18 +
.../clientpositive/llap/script_env_var2.q.out | 16 +
.../clientpositive/llap/script_pipe.q.out | 126 +
.../clientpositive/llap/scriptfile1.q.out | 53 +
.../llap/selectDistinctStar.q.out | 4910 +++
.../llap/select_dummy_source.q.out | 229 +
.../llap/show_create_database.q.out | 19 +
.../results/clientpositive/llap/skewjoin.q.out | 1195 +
.../clientpositive/llap/stats_counter.q.out | 102 +
.../llap/stats_counter_partitioned.q.out | 465 +
.../clientpositive/llap/stats_noscan_1.q.out | 520 +
.../clientpositive/llap/stats_only_null.q.out | 422 +
.../clientpositive/llap/subquery_exists.q.out | 214 +
.../clientpositive/llap/subquery_in.q.out | 961 +
.../clientpositive/llap/temp_table.q.out | 469 +
.../llap/tez_bmj_schema_evolution.q.out | 2214 ++
.../results/clientpositive/llap/tez_dml.q.out | 1526 +
.../llap/tez_dynpart_hashjoin_1.q.out | 817 +
.../llap/tez_dynpart_hashjoin_2.q.out | 579 +
.../clientpositive/llap/tez_fsstat.q.out | 102 +
...tez_insert_overwrite_local_directory_1.q.out | 20 +
.../results/clientpositive/llap/tez_join.q.out | 150 +
.../clientpositive/llap/tez_join_hash.q.out | 980 +
.../llap/tez_join_result_complex.q.out | 2163 +
.../clientpositive/llap/tez_join_tests.q.out | 2227 ++
.../clientpositive/llap/tez_joins_explain.q.out | 715 +
.../clientpositive/llap/tez_multi_union.q.out | 833 +
.../llap/tez_schema_evolution.q.out | 114 +
.../clientpositive/llap/tez_self_join.q.out | 210 +
.../results/clientpositive/llap/tez_smb_1.q.out | 616 +
.../clientpositive/llap/tez_smb_main.q.out | 1422 +
.../results/clientpositive/llap/tez_union.q.out | 1438 +
.../clientpositive/llap/tez_union2.q.out | 820 +
.../clientpositive/llap/tez_union_decimal.q.out | 101 +
.../llap/tez_union_dynamic_partition.q.out | 158 +
.../llap/tez_union_group_by.q.out | 410 +
.../llap/tez_union_multiinsert.q.out | 4399 +++
.../llap/tez_vector_dynpart_hashjoin_1.q.out | 817 +
.../llap/tez_vector_dynpart_hashjoin_2.q.out | 579 +
.../clientpositive/llap/transform1.q.out | 138 +
.../clientpositive/llap/transform2.q.out | 11 +
.../clientpositive/llap/transform_ppr1.q.out | 569 +
.../clientpositive/llap/transform_ppr2.q.out | 475 +
.../results/clientpositive/llap/union2.q.out | 104 +
.../results/clientpositive/llap/union3.q.out | 251 +
.../results/clientpositive/llap/union4.q.out | 175 +
.../results/clientpositive/llap/union5.q.out | 154 +
.../results/clientpositive/llap/union6.q.out | 172 +
.../results/clientpositive/llap/union7.q.out | 150 +
.../results/clientpositive/llap/union8.q.out | 1601 +
.../results/clientpositive/llap/union9.q.out | 130 +
.../clientpositive/llap/unionDistinct_1.q.out | 16453 ++++++++
.../clientpositive/llap/unionDistinct_2.q.out | 545 +
.../clientpositive/llap/union_fast_stats.q.out | 526 +
.../clientpositive/llap/union_view.q.out | 1209 +
.../llap/update_after_multiple_inserts.q.out | 78 +
.../llap/update_all_non_partitioned.q.out | 62 +
.../llap/update_all_partitioned.q.out | 106 +
.../clientpositive/llap/update_all_types.q.out | 196 +
.../clientpositive/llap/update_orig_table.q.out | 62 +
.../clientpositive/llap/update_tmp_table.q.out | 62 +
.../clientpositive/llap/update_two_cols.q.out | 63 +
.../llap/update_where_no_match.q.out | 62 +
.../llap/update_where_non_partitioned.q.out | 62 +
.../llap/update_where_partitioned.q.out | 106 +
.../clientpositive/llap/vector_acid3.q.out | 31 +
.../llap/vector_aggregate_9.q.out | 174 +
.../llap/vector_auto_smb_mapjoin_14.q.out | 1918 +
.../clientpositive/llap/vector_between_in.q.out | 691 +
.../llap/vector_binary_join_groupby.q.out | 305 +
.../clientpositive/llap/vector_bucket.q.out | 104 +
.../llap/vector_cast_constant.q.java1.7.out | 217 +
.../llap/vector_cast_constant.q.java1.8.out | 217 +
.../clientpositive/llap/vector_char_2.q.out | 292 +
.../clientpositive/llap/vector_char_4.q.out | 175 +
.../clientpositive/llap/vector_char_cast.q.out | 35 +
.../llap/vector_char_mapjoin1.q.out | 470 +
.../llap/vector_char_simple.q.out | 342 +
.../clientpositive/llap/vector_coalesce.q.out | 362 +
.../clientpositive/llap/vector_coalesce_2.q.out | 304 +
.../llap/vector_count_distinct.q.out | 1381 +
.../clientpositive/llap/vector_data_types.q.out | 285 +
.../clientpositive/llap/vector_date_1.q.out | 719 +
.../clientpositive/llap/vector_decimal_1.q.out | 591 +
.../llap/vector_decimal_10_0.q.out | 112 +
.../clientpositive/llap/vector_decimal_2.q.out | 1658 +
.../clientpositive/llap/vector_decimal_3.q.out | 390 +
.../clientpositive/llap/vector_decimal_4.q.out | 250 +
.../clientpositive/llap/vector_decimal_5.q.out | 239 +
.../clientpositive/llap/vector_decimal_6.q.out | 303 +
.../llap/vector_decimal_aggregate.q.out | 232 +
.../llap/vector_decimal_cast.q.out | 41 +
.../llap/vector_decimal_expressions.q.out | 96 +
.../llap/vector_decimal_mapjoin.q.out | 264 +
.../llap/vector_decimal_math_funcs.q.out | 192 +
.../llap/vector_decimal_precision.q.out | 672 +
.../llap/vector_decimal_round.q.out | 460 +
.../llap/vector_decimal_round_2.q.out | 500 +
.../llap/vector_decimal_trailing.q.out | 121 +
.../llap/vector_decimal_udf.q.out | 2756 ++
.../llap/vector_decimal_udf2.q.out | 188 +
.../clientpositive/llap/vector_distinct_2.q.out | 1870 +
.../clientpositive/llap/vector_elt.q.out | 121 +
.../clientpositive/llap/vector_groupby_3.q.out | 1873 +
.../llap/vector_groupby_reduce.q.out | 1882 +
.../llap/vector_grouping_sets.q.out | 269 +
.../clientpositive/llap/vector_if_expr.q.out | 82 +
.../clientpositive/llap/vector_inner_join.q.out | 806 +
.../clientpositive/llap/vector_interval_1.q.out | 822 +
.../clientpositive/llap/vector_interval_2.q.out | 1620 +
.../llap/vector_interval_mapjoin.q.out | 281 +
.../clientpositive/llap/vector_join30.q.out | 1375 +
.../llap/vector_join_filters.q.out | 222 +
.../clientpositive/llap/vector_join_nulls.q.out | 195 +
.../llap/vector_left_outer_join.q.out | 141 +
.../llap/vector_left_outer_join2.q.out | 559 +
.../llap/vector_leftsemi_mapjoin.q.out | 13973 +++++++
.../llap/vector_mapjoin_reduce.q.out | 319 +
.../llap/vector_mr_diff_schema_alias.q.out | 383 +
.../llap/vector_multi_insert.q.out | 233 +
.../llap/vector_non_string_partition.q.out | 182 +
.../llap/vector_null_projection.q.out | 186 +
.../llap/vector_nullsafe_join.q.out | 1210 +
.../clientpositive/llap/vector_orderby_5.q.out | 189 +
.../llap/vector_outer_join0.q.out | 232 +
.../llap/vector_outer_join1.q.out | 613 +
.../llap/vector_outer_join2.q.out | 316 +
.../llap/vector_outer_join3.q.out | 609 +
.../llap/vector_outer_join4.q.out | 982 +
.../llap/vector_outer_join5.q.out | 1330 +
.../llap/vector_partition_diff_num_cols.q.out | 614 +
.../llap/vector_partitioned_date_time.q.out | 2047 +
.../llap/vector_reduce_groupby_decimal.q.out | 201 +
.../llap/vector_string_concat.q.out | 415 +
.../clientpositive/llap/vector_varchar_4.q.out | 175 +
.../llap/vector_varchar_mapjoin1.q.out | 454 +
.../llap/vector_varchar_simple.q.out | 342 +
.../clientpositive/llap/vectorization_0.q.out | 1099 +
.../clientpositive/llap/vectorization_1.q.out | 49 +
.../clientpositive/llap/vectorization_10.q.out | 298 +
.../clientpositive/llap/vectorization_11.q.out | 80 +
.../clientpositive/llap/vectorization_12.q.out | 602 +
.../clientpositive/llap/vectorization_13.q.out | 510 +
.../clientpositive/llap/vectorization_14.q.out | 836 +
.../clientpositive/llap/vectorization_15.q.out | 253 +
.../clientpositive/llap/vectorization_16.q.out | 671 +
.../clientpositive/llap/vectorization_17.q.out | 507 +
.../clientpositive/llap/vectorization_2.q.out | 53 +
.../clientpositive/llap/vectorization_3.q.out | 59 +
.../clientpositive/llap/vectorization_4.q.out | 53 +
.../clientpositive/llap/vectorization_5.q.out | 47 +
.../clientpositive/llap/vectorization_6.q.out | 1624 +
.../clientpositive/llap/vectorization_7.q.out | 380 +
.../clientpositive/llap/vectorization_8.q.out | 354 +
.../clientpositive/llap/vectorization_9.q.out | 671 +
.../llap/vectorization_decimal_date.q.out | 51 +
.../llap/vectorization_div0.q.out | 485 +
.../llap/vectorization_limit.q.out | 554 +
.../llap/vectorization_nested_udf.q.out | 9 +
.../clientpositive/llap/vectorization_not.q.out | 58 +
.../llap/vectorization_part.q.out | 72 +
.../llap/vectorization_part_project.q.out | 123 +
.../llap/vectorization_pushdown.q.out | 71 +
.../llap/vectorization_short_regress.q.out | 3414 ++
.../llap/vectorized_bucketmapjoin1.q.out | 376 +
.../clientpositive/llap/vectorized_case.q.out | 95 +
.../clientpositive/llap/vectorized_casts.q.out | 370 +
.../llap/vectorized_context.q.out | 332 +
.../llap/vectorized_date_funcs.q.out | 1019 +
.../llap/vectorized_distinct_gby.q.out | 172 +
.../vectorized_dynamic_partition_pruning.q.out | 5341 +++
.../llap/vectorized_mapjoin.q.out | 114 +
.../llap/vectorized_math_funcs.q.out | 247 +
.../llap/vectorized_nested_mapjoin.q.out | 136 +
.../llap/vectorized_parquet.q.out | 325 +
.../llap/vectorized_parquet_types.q.out | 349 +
.../clientpositive/llap/vectorized_ptf.q.out | 8992 +++++
.../llap/vectorized_rcfile_columnar.q.out | 62 +
.../llap/vectorized_shufflejoin.q.out | 132 +
.../llap/vectorized_string_funcs.q.out | 123 +
.../llap/vectorized_timestamp_funcs.q.out | 883 +
.../llap/vectorized_timestamp_ints_casts.q.out | 234 +
.../clientpositive/llap_partitioned.q.out | 1999 +
.../clientpositive/llap_uncompressed.q.out | 228 +
.../clientpositive/load_non_hdfs_path.q.out | 16 +
.../results/clientpositive/load_orc_part.q.out | 26 +
.../clientpositive/louter_join_ppr.q.out | 4 +
.../clientpositive/mapjoin_mapjoin.q.out | 1 +
ql/src/test/results/clientpositive/merge3.q.out | 12 +-
.../clientpositive/metadata_only_queries.q.out | 158 +
.../results/clientpositive/metadataonly1.q.out | 97 +-
.../results/clientpositive/nonmr_fetch.q.out | 78 +-
.../test/results/clientpositive/nullMap.q.out | 46 +
.../clientpositive/optimize_nullscan.q.out | 116 +-
.../results/clientpositive/orc_analyze.q.out | 46 +-
.../results/clientpositive/orc_file_dump.q.out | 18 +-
.../clientpositive/orc_int_type_promotion.q.out | 6 +-
.../test/results/clientpositive/orc_llap.q.out | 1095 +
.../clientpositive/outer_join_ppr.q.java1.7.out | 2 +
.../clientpositive/parallel_orderby.q.out | 8 +-
.../parquet_mixed_partition_formats2.q.out | 99 +
.../clientpositive/parquet_ppd_boolean.q.out | 28 +-
.../clientpositive/parquet_ppd_char.q.out | 84 +-
.../clientpositive/parquet_ppd_date.q.out | 112 +-
.../clientpositive/parquet_ppd_decimal.q.out | 224 +-
.../clientpositive/parquet_ppd_timestamp.q.out | 112 +-
.../clientpositive/parquet_ppd_varchar.q.out | 84 +-
ql/src/test/results/clientpositive/pcr.q.out | 37 +-
.../results/clientpositive/pointlookup2.q.out | 25 +-
.../results/clientpositive/pointlookup3.q.out | 23 +-
.../clientpositive/ppd_join_filter.q.out | 12 +-
.../results/clientpositive/ppd_union_view.q.out | 8 +-
ql/src/test/results/clientpositive/ppd_vc.q.out | 4 +-
.../clientpositive/ppr_allchildsarenull.q.out | 2 +
.../test/results/clientpositive/push_or.q.out | 1 +
.../query_result_fileformat.q.out | 6 +-
.../clientpositive/rand_partitionpruner1.q.out | 1 +
.../clientpositive/rand_partitionpruner2.q.out | 4 +-
.../results/clientpositive/regexp_extract.q.out | 2 +
.../results/clientpositive/remote_script.q.out | 8 +-
.../clientpositive/router_join_ppr.q.out | 4 +
.../test/results/clientpositive/sample1.q.out | 4 +-
.../test/results/clientpositive/sample10.q.out | 3 +-
.../test/results/clientpositive/sample2.q.out | 4 +-
.../test/results/clientpositive/sample4.q.out | 4 +-
.../test/results/clientpositive/sample5.q.out | 4 +-
.../test/results/clientpositive/sample6.q.out | 11 +-
.../test/results/clientpositive/sample7.q.out | 4 +-
.../test/results/clientpositive/sample8.q.out | 1 +
.../test/results/clientpositive/sample9.q.out | 1 +
.../results/clientpositive/scriptfile1.q.out | 6 +
.../clientpositive/select_dummy_source.q.out | 38 +-
.../results/clientpositive/selectindate.q.out | 70 +
.../clientpositive/serde_user_properties.q.out | 3 +
.../test/results/clientpositive/show_conf.q.out | 6 +-
.../clientpositive/show_create_database.q.out | 19 +
.../results/clientpositive/show_functions.q.out | 1 +
.../clientpositive/skewjoin_onesideskew.q.out | 212 +
.../results/clientpositive/smb_mapjoin9.q.out | 2 +
.../results/clientpositive/smb_mapjoin_11.q.out | 2 +
.../results/clientpositive/smb_mapjoin_13.q.out | 2 +
.../results/clientpositive/smb_mapjoin_15.q.out | 4 +
.../clientpositive/sort_merge_join_desc_5.q.out | 1 +
.../clientpositive/sort_merge_join_desc_6.q.out | 1 +
.../clientpositive/sort_merge_join_desc_7.q.out | 1 +
.../spark/auto_join_reordering_values.q.out | 1 +
.../spark/auto_sortmerge_join_1.q.out | 3 +
.../spark/auto_sortmerge_join_12.q.out | 1 +
.../spark/auto_sortmerge_join_2.q.out | 2 +
.../spark/auto_sortmerge_join_3.q.out | 3 +
.../spark/auto_sortmerge_join_4.q.out | 3 +
.../spark/auto_sortmerge_join_5.q.out | 3 +
.../spark/auto_sortmerge_join_7.q.out | 3 +
.../spark/auto_sortmerge_join_8.q.out | 3 +
.../spark/bucket_map_join_1.q.out | 9 +-
.../spark/bucket_map_join_2.q.out | 9 +-
.../spark/bucket_map_join_spark4.q.out | 2 +
.../clientpositive/spark/bucketmapjoin1.q.out | 2 +
.../clientpositive/spark/bucketmapjoin10.q.out | 1 +
.../clientpositive/spark/bucketmapjoin11.q.out | 2 +
.../clientpositive/spark/bucketmapjoin12.q.out | 2 +
.../clientpositive/spark/bucketmapjoin13.q.out | 4 +
.../clientpositive/spark/bucketmapjoin7.q.out | 1 +
.../clientpositive/spark/bucketmapjoin8.q.out | 2 +
.../clientpositive/spark/bucketmapjoin9.q.out | 2 +
.../spark/bucketmapjoin_negative3.q.out | 9 +
.../spark/column_access_stats.q.out | 46 +-
.../clientpositive/spark/cross_join.q.out | 211 +
.../clientpositive/spark/decimal_1_1.q.out | 104 +
.../spark/dynamic_rdd_cache.q.out | 28 +-
.../spark/filter_join_breaktask.q.out | 1 +
.../spark/groupby_sort_1_23.q.out | 1 +
.../spark/groupby_sort_skew_1_23.q.out | 1 +
.../clientpositive/spark/insert_into1.q.out | 238 +
.../clientpositive/spark/insert_into2.q.out | 75 +
.../spark/join_cond_pushdown_unqual1.q.out | 12 +-
.../spark/join_cond_pushdown_unqual3.q.out | 18 +-
.../spark/join_cond_pushdown_unqual4.q.out | 2 +-
.../spark/join_filters_overlap.q.out | 5 +
.../clientpositive/spark/louter_join_ppr.q.out | 4 +
.../clientpositive/spark/mapjoin_mapjoin.q.out | 1 +
.../spark/metadata_only_queries.q.out | 170 +
.../spark/optimize_nullscan.q.out | 9 +
.../spark/outer_join_ppr.q.java1.7.out | 2 +
.../test/results/clientpositive/spark/pcr.q.out | 33 +-
.../clientpositive/spark/ppd_join5.q.out | 58 +-
.../clientpositive/spark/ppd_join_filter.q.out | 4 +
.../clientpositive/spark/remote_script.q.out | 8 +-
.../clientpositive/spark/router_join_ppr.q.out | 4 +
.../results/clientpositive/spark/sample10.q.out | 1 +
.../results/clientpositive/spark/sample6.q.out | 7 +
.../results/clientpositive/spark/sample8.q.out | 1 +
.../clientpositive/spark/scriptfile1.q.out | 6 +
.../clientpositive/spark/smb_mapjoin_12.q.out | 6 +-
.../clientpositive/spark/smb_mapjoin_13.q.out | 38 +-
.../clientpositive/spark/smb_mapjoin_15.q.out | 16 +-
.../clientpositive/spark/smb_mapjoin_16.q.out | 2 +-
.../results/clientpositive/spark/stats3.q.out | 2 -
.../clientpositive/spark/temp_table.q.out | 107 +
.../clientpositive/spark/transform_ppr1.q.out | 1 +
.../clientpositive/spark/transform_ppr2.q.out | 1 +
.../results/clientpositive/spark/union24.q.out | 10 +
.../results/clientpositive/spark/union34.q.out | 68 +-
.../clientpositive/spark/union_ppr.q.out | 1 +
.../spark/vector_between_in.q.out | 16 +-
.../spark/vector_cast_constant.q.java1.7.out | 2 +-
.../spark/vector_count_distinct.q.out | 4 +-
.../spark/vector_data_types.q.out | 2 +-
.../spark/vector_decimal_aggregate.q.out | 2 +-
.../spark/vector_decimal_mapjoin.q.out | 4 +-
.../spark/vector_distinct_2.q.out | 2 +-
.../clientpositive/spark/vector_groupby_3.q.out | 2 +-
.../spark/vector_left_outer_join.q.out | 8 +-
.../spark/vector_mapjoin_reduce.q.out | 4 +-
.../clientpositive/spark/vector_orderby_5.q.out | 4 +-
.../spark/vector_string_concat.q.out | 4 +-
.../clientpositive/spark/vectorization_0.q.out | 30 +-
.../clientpositive/spark/vectorization_13.q.out | 4 +-
.../clientpositive/spark/vectorization_14.q.out | 2 +-
.../clientpositive/spark/vectorization_15.q.out | 2 +-
.../clientpositive/spark/vectorization_17.q.out | 2 +-
.../spark/vectorization_div0.q.out | 4 +-
.../spark/vectorization_part_project.q.out | 2 +-
.../spark/vectorization_short_regress.q.out | 32 +-
.../spark/vectorized_mapjoin.q.out | 4 +-
.../spark/vectorized_nested_mapjoin.q.out | 8 +-
.../clientpositive/spark/vectorized_ptf.q.out | 182 +-
.../spark/vectorized_shufflejoin.q.out | 2 +-
.../spark/vectorized_timestamp_funcs.q.out | 12 +-
.../special_character_in_tabnames_1.q.out | 19550 +++++++++
.../special_character_in_tabnames_2.q.out | 304 +
ql/src/test/results/clientpositive/stats0.q.out | 4 +-
.../test/results/clientpositive/stats11.q.out | 8 +-
ql/src/test/results/clientpositive/stats3.q.out | 2 -
.../subquery_notin_having.q.java1.8.out | 86 +-
.../results/clientpositive/subquery_views.q.out | 116 +
.../results/clientpositive/temp_table.q.out | 107 +
.../temp_table_display_colstats_tbllvl.q.out | 1 +
.../tez/acid_vectorization_partition.q.out | 18 +-
.../results/clientpositive/tez/auto_join0.q.out | 1 -
.../clientpositive/tez/auto_join_nulls.q.out | 2 +-
.../tez/auto_sortmerge_join_1.q.out | 3 +
.../tez/auto_sortmerge_join_10.q.out | 57 +-
.../tez/auto_sortmerge_join_11.q.out | 4 +
.../tez/auto_sortmerge_join_12.q.out | 97 +-
.../tez/auto_sortmerge_join_2.q.out | 2 +
.../tez/auto_sortmerge_join_3.q.out | 3 +
.../tez/auto_sortmerge_join_4.q.out | 3 +
.../tez/auto_sortmerge_join_5.q.out | 3 +
.../tez/auto_sortmerge_join_6.q.out | 160 +-
.../tez/auto_sortmerge_join_7.q.out | 3 +
.../tez/auto_sortmerge_join_8.q.out | 3 +
.../tez/bucket_map_join_tez1.q.out | 236 +-
.../tez/bucket_map_join_tez2.q.out | 108 +-
.../results/clientpositive/tez/cross_join.q.out | 187 +
.../tez/cross_product_check_2.q.out | 201 +-
.../tez/dynamic_partition_pruning.q.out | 133 +-
.../tez/dynamic_partition_pruning_2.q.out | 54 +-
.../tez/dynpart_sort_opt_vectorization.q.out | 74 +-
.../tez/dynpart_sort_optimization.q.out | 12 +-
.../tez/dynpart_sort_optimization2.q.out | 16 +-
.../clientpositive/tez/explainuser_1.q.out | 125 +-
.../clientpositive/tez/explainuser_2.q.out | 1070 +-
.../clientpositive/tez/explainuser_3.q.out | 55 +-
.../clientpositive/tez/fileformat_mix.q.out | 573 -
.../tez/filter_join_breaktask.q.out | 1 +
.../tez/identity_project_remove_skip.q.out | 3 +-
.../results/clientpositive/tez/insert1.q.out | 392 +-
.../clientpositive/tez/insert_into1.q.out | 250 +
.../clientpositive/tez/insert_into2.q.out | 75 +
.../clientpositive/tez/llapdecider.q.out | 1195 +
.../clientpositive/tez/mapjoin_mapjoin.q.out | 1 +
.../results/clientpositive/tez/mergejoin.q.out | 26 +-
.../tez/metadata_only_queries.q.out | 170 +
.../metadata_only_queries_with_filters.q.out | 32 +-
.../clientpositive/tez/metadataonly1.q.out | 9 +
.../test/results/clientpositive/tez/mrr.q.out | 83 +-
.../clientpositive/tez/optimize_nullscan.q.out | 9 +
.../clientpositive/tez/orc_analyze.q.out | 46 +-
.../clientpositive/tez/scriptfile1.q.out | 6 +
.../tez/select_dummy_source.q.out | 76 +-
.../tez/show_create_database.q.out | 19 +
.../results/clientpositive/tez/temp_table.q.out | 107 +
.../tez/tez_dynpart_hashjoin_1.q.out | 10 +-
.../clientpositive/tez/tez_join_hash.q.out | 4 +-
.../tez/tez_vector_dynpart_hashjoin_1.q.out | 24 +-
.../tez/tez_vector_dynpart_hashjoin_2.q.out | 8 +-
.../clientpositive/tez/transform_ppr1.q.out | 1 +
.../clientpositive/tez/transform_ppr2.q.out | 1 +
.../clientpositive/tez/unionDistinct_1.q.out | 93 +-
.../clientpositive/tez/union_fast_stats.q.out | 526 +
.../results/clientpositive/tez/union_view.q.out | 167 +
.../clientpositive/tez/update_all_types.q.out | 4 +-
.../tez/vector_auto_smb_mapjoin_14.q.out | 1576 +
.../clientpositive/tez/vector_between_in.q.out | 16 +-
.../tez/vector_binary_join_groupby.q.out | 4 +-
.../clientpositive/tez/vector_bucket.q.out | 2 +-
.../tez/vector_cast_constant.q.java1.7.out | 2 +-
.../tez/vector_cast_constant.q.java1.8.out | 21 +-
.../tez/vector_cast_constant.q.out | 199 -
.../clientpositive/tez/vector_char_2.q.out | 8 +-
.../clientpositive/tez/vector_char_cast.q.out | 35 +
.../tez/vector_char_mapjoin1.q.out | 6 +-
.../clientpositive/tez/vector_char_simple.q.out | 6 +-
.../clientpositive/tez/vector_coalesce.q.out | 149 +-
.../clientpositive/tez/vector_coalesce_2.q.out | 2 +-
.../tez/vector_count_distinct.q.out | 4 +-
.../clientpositive/tez/vector_data_types.q.out | 2 +-
.../clientpositive/tez/vector_date_1.q.out | 12 +-
.../clientpositive/tez/vector_decimal_1.q.out | 18 +-
.../tez/vector_decimal_10_0.q.out | 2 +-
.../clientpositive/tez/vector_decimal_2.q.out | 54 +-
.../tez/vector_decimal_aggregate.q.out | 2 +-
.../tez/vector_decimal_expressions.q.out | 2 +-
.../tez/vector_decimal_round.q.out | 12 +-
.../tez/vector_decimal_round_2.q.out | 8 +-
.../clientpositive/tez/vector_decimal_udf.q.out | 8 +-
.../clientpositive/tez/vector_distinct_2.q.out | 2 +-
.../clientpositive/tez/vector_groupby_3.q.out | 2 +-
.../tez/vector_groupby_reduce.q.out | 1474 +-
.../tez/vector_grouping_sets.q.out | 2 +-
.../clientpositive/tez/vector_if_expr.q.out | 2 +-
.../clientpositive/tez/vector_interval_1.q.out | 16 +-
.../clientpositive/tez/vector_interval_2.q.out | 20 +-
.../clientpositive/tez/vector_join30.q.out | 50 +-
.../tez/vector_left_outer_join.q.out | 2 +-
.../tez/vector_left_outer_join3.q.out | 222 -
.../tez/vector_leftsemi_mapjoin.q.out | 160 +-
.../tez/vector_mapjoin_reduce.q.out | 4 +-
.../tez/vector_mr_diff_schema_alias.q.out | 6 +-
.../tez/vector_multi_insert.q.out | 8 +
.../tez/vector_non_string_partition.q.out | 4 +-
.../clientpositive/tez/vector_orderby_5.q.out | 4 +-
.../clientpositive/tez/vector_outer_join.q.out | 2204 --
.../clientpositive/tez/vector_outer_join1.q.out | 50 +-
.../clientpositive/tez/vector_outer_join2.q.out | 2 +-
.../clientpositive/tez/vector_outer_join3.q.out | 6 +-
.../clientpositive/tez/vector_outer_join4.q.out | 50 +-
.../clientpositive/tez/vector_outer_join5.q.out | 20 +-
.../tez/vector_partition_diff_num_cols.q.out | 10 +-
.../tez/vector_partitioned_date_time.q.out | 18 +-
.../tez/vector_reduce_groupby_decimal.q.out | 4 +-
.../tez/vector_string_concat.q.out | 4 +-
.../tez/vector_varchar_mapjoin1.q.out | 6 +-
.../tez/vector_varchar_simple.q.out | 6 +-
.../clientpositive/tez/vectorization_0.q.out | 30 +-
.../clientpositive/tez/vectorization_13.q.out | 4 +-
.../clientpositive/tez/vectorization_14.q.out | 2 +-
.../clientpositive/tez/vectorization_15.q.out | 2 +-
.../clientpositive/tez/vectorization_17.q.out | 2 +-
.../clientpositive/tez/vectorization_7.q.out | 4 +-
.../clientpositive/tez/vectorization_8.q.out | 4 +-
.../clientpositive/tez/vectorization_div0.q.out | 4 +-
.../tez/vectorization_limit.q.out | 16 +-
.../tez/vectorization_part_project.q.out | 2 +-
.../tez/vectorization_short_regress.q.out | 32 +-
.../tez/vectorized_date_funcs.q.out | 4 +-
.../tez/vectorized_distinct_gby.q.out | 4 +-
.../vectorized_dynamic_partition_pruning.q.out | 231 +-
.../tez/vectorized_nested_mapjoin.q.out | 2 +-
.../clientpositive/tez/vectorized_ptf.q.out | 182 +-
.../tez/vectorized_shufflejoin.q.out | 2 +-
.../tez/vectorized_timestamp_funcs.q.out | 12 +-
ql/src/test/results/clientpositive/topn.q.out | 42 +
.../results/clientpositive/transform_ppr1.q.out | 1 +
.../results/clientpositive/transform_ppr2.q.out | 1 +
.../results/clientpositive/udf_explode.q.out | 210 +-
.../results/clientpositive/udf_greatest.q.out | 70 +-
.../results/clientpositive/udf_inline.q.out | 42 +-
.../test/results/clientpositive/udf_least.q.out | 70 +-
.../results/clientpositive/udtf_explode.q.out | 156 +-
.../clientpositive/udtf_nofetchtask.q.out | 30 +
.../test/results/clientpositive/union22.q.out | 2 +-
.../test/results/clientpositive/union24.q.out | 18 +-
.../test/results/clientpositive/union36.q.out | 28 +
.../clientpositive/unionDistinct_1.q.out | 49 +-
.../clientpositive/union_fast_stats.q.out | 526 +
.../test/results/clientpositive/union_ppr.q.out | 1 +
.../clientpositive/update_all_types.q.out | 4 +-
.../vector_auto_smb_mapjoin_14.q.out | 1792 +
.../clientpositive/vector_char_cast.q.out | 35 +
.../clientpositive/vector_char_mapjoin1.q.out | 6 +-
.../clientpositive/vector_coalesce.q.out | 151 +
.../clientpositive/vector_decimal_mapjoin.q.out | 2 +-
.../clientpositive/vector_groupby_reduce.q.out | 1495 +-
.../clientpositive/vector_inner_join.q.out | 18 +-
.../vector_interval_mapjoin.q.out | 2 +-
.../clientpositive/vector_left_outer_join.q.out | 2 +-
.../vector_left_outer_join2.q.out | 8 +-
.../vector_leftsemi_mapjoin.q.out | 152 +-
.../vector_mr_diff_schema_alias.q.out | 2 +-
.../clientpositive/vector_multi_insert.q.out | 8 +
.../clientpositive/vector_nullsafe_join.q.out | 20 +-
.../results/clientpositive/vector_nvl.q.out | 233 +
.../clientpositive/vector_outer_join0.q.out | 4 +-
.../clientpositive/vector_outer_join1.q.out | 6 +-
.../clientpositive/vector_outer_join2.q.out | 2 +-
.../clientpositive/vector_outer_join3.q.out | 6 +-
.../clientpositive/vector_outer_join4.q.out | 6 +-
.../clientpositive/vector_outer_join5.q.out | 20 +-
.../clientpositive/vector_struct_in.q.out | 825 +
.../vector_varchar_mapjoin1.q.out | 6 +-
.../clientpositive/vectorization_limit.q.out | 8 +-
.../clientpositive/vectorized_context.q.out | 2 +-
.../clientpositive/vectorized_mapjoin.q.out | 2 +-
.../vectorized_nested_mapjoin.q.out | 2 +-
.../results/clientpositive/vectorized_ptf.q.out | 236 +-
.../clientpositive/windowing_windowspec2.q.out | 198 +-
ql/src/test/templates/TestCliDriver.vm | 3 +-
ql/src/test/templates/TestCompareCliDriver.vm | 4 +-
serde/if/serde.thrift | 1 +
serde/pom.xml | 90 +-
.../gen/thrift/gen-cpp/complex_constants.cpp | 2 +-
.../src/gen/thrift/gen-cpp/complex_constants.h | 2 +-
serde/src/gen/thrift/gen-cpp/complex_types.cpp | 94 +-
serde/src/gen/thrift/gen-cpp/complex_types.h | 46 +-
.../gen/thrift/gen-cpp/megastruct_constants.cpp | 2 +-
.../gen/thrift/gen-cpp/megastruct_constants.h | 2 +-
.../src/gen/thrift/gen-cpp/megastruct_types.cpp | 70 +-
serde/src/gen/thrift/gen-cpp/megastruct_types.h | 24 +-
.../src/gen/thrift/gen-cpp/serde_constants.cpp | 4 +-
serde/src/gen/thrift/gen-cpp/serde_constants.h | 3 +-
serde/src/gen/thrift/gen-cpp/serde_types.cpp | 2 +-
serde/src/gen/thrift/gen-cpp/serde_types.h | 2 +-
.../gen/thrift/gen-cpp/testthrift_constants.cpp | 2 +-
.../gen/thrift/gen-cpp/testthrift_constants.h | 2 +-
.../src/gen/thrift/gen-cpp/testthrift_types.cpp | 34 +-
serde/src/gen/thrift/gen-cpp/testthrift_types.h | 24 +-
.../hadoop/hive/serde/serdeConstants.java | 4 +-
.../hadoop/hive/serde/test/InnerStruct.java | 6 +-
.../hadoop/hive/serde/test/ThriftTestObj.java | 6 +-
.../hadoop/hive/serde2/thrift/test/Complex.java | 6 +-
.../hive/serde2/thrift/test/IntString.java | 8 +-
.../hive/serde2/thrift/test/MegaStruct.java | 16 +-
.../hive/serde2/thrift/test/MiniStruct.java | 4 +-
.../hadoop/hive/serde2/thrift/test/MyEnum.java | 2 +-
.../hive/serde2/thrift/test/PropValueUnion.java | 2 +-
.../hive/serde2/thrift/test/SetIntString.java | 4 +-
serde/src/gen/thrift/gen-php/Types.php | 4 +-
.../org/apache/hadoop/hive/serde/Types.php | 7 +-
.../src/gen/thrift/gen-py/complex/constants.py | 2 +-
serde/src/gen/thrift/gen-py/complex/ttypes.py | 44 +-
.../gen/thrift/gen-py/megastruct/constants.py | 2 +-
.../src/gen/thrift/gen-py/megastruct/ttypes.py | 50 +-
.../org_apache_hadoop_hive_serde/constants.py | 3 +-
.../org_apache_hadoop_hive_serde/ttypes.py | 2 +-
.../gen/thrift/gen-py/testthrift/constants.py | 2 +-
.../src/gen/thrift/gen-py/testthrift/ttypes.py | 8 +-
.../src/gen/thrift/gen-rb/complex_constants.rb | 2 +-
serde/src/gen/thrift/gen-rb/complex_types.rb | 2 +-
.../gen/thrift/gen-rb/megastruct_constants.rb | 2 +-
serde/src/gen/thrift/gen-rb/megastruct_types.rb | 2 +-
serde/src/gen/thrift/gen-rb/serde_constants.rb | 4 +-
serde/src/gen/thrift/gen-rb/serde_types.rb | 2 +-
.../gen/thrift/gen-rb/testthrift_constants.rb | 2 +-
serde/src/gen/thrift/gen-rb/testthrift_types.rb | 2 +-
.../hive/serde2/ColumnProjectionUtils.java | 20 +-
.../apache/hadoop/hive/serde2/WriteBuffers.java | 4 +-
.../hive/serde2/avro/AvroDeserializer.java | 2 +-
.../hadoop/hive/serde2/avro/AvroSerDe.java | 13 +-
.../hadoop/hive/serde2/avro/AvroSerdeUtils.java | 31 +-
.../hadoop/hive/serde2/avro/AvroSerializer.java | 2 -
.../BinarySortableSerDeWithEndPrefix.java | 41 +
.../hive/serde2/columnar/ColumnarSerDe.java | 6 +-
.../hive/serde2/lazy/LazySerDeParameters.java | 45 +-
.../hive/serde2/lazy/LazySimpleSerDe.java | 7 +-
.../hadoop/hive/serde2/lazy/LazyUtils.java | 32 +-
.../objectinspector/ObjectInspectorUtils.java | 34 +
.../hadoop/hive/serde2/avro/TestAvroSerde.java | 28 +-
.../hive/serde2/avro/TestAvroSerdeUtils.java | 18 +-
.../TestObjectInspectorUtils.java | 25 +
service/pom.xml | 45 +-
service/src/gen/thrift/gen-cpp/TCLIService.cpp | 1770 +-
service/src/gen/thrift/gen-cpp/TCLIService.h | 409 +-
.../thrift/gen-cpp/TCLIService_constants.cpp | 2 +-
.../gen/thrift/gen-cpp/TCLIService_constants.h | 2 +-
.../gen/thrift/gen-cpp/TCLIService_types.cpp | 1226 +-
.../src/gen/thrift/gen-cpp/TCLIService_types.h | 816 +-
service/src/gen/thrift/gen-cpp/ThriftHive.cpp | 853 +-
service/src/gen/thrift/gen-cpp/ThriftHive.h | 199 +-
.../thrift/gen-cpp/hive_service_constants.cpp | 2 +-
.../gen/thrift/gen-cpp/hive_service_constants.h | 2 +-
.../gen/thrift/gen-cpp/hive_service_types.cpp | 55 +-
.../src/gen/thrift/gen-cpp/hive_service_types.h | 26 +-
.../hadoop/hive/service/HiveClusterStatus.java | 14 +-
.../hive/service/HiveServerException.java | 6 +-
.../hadoop/hive/service/JobTrackerState.java | 2 +-
.../apache/hadoop/hive/service/ThriftHive.java | 6 +-
.../service/cli/thrift/TArrayTypeEntry.java | 6 +-
.../hive/service/cli/thrift/TBinaryColumn.java | 6 +-
.../hive/service/cli/thrift/TBoolColumn.java | 4 +-
.../hive/service/cli/thrift/TBoolValue.java | 6 +-
.../hive/service/cli/thrift/TByteColumn.java | 4 +-
.../hive/service/cli/thrift/TByteValue.java | 6 +-
.../hive/service/cli/thrift/TCLIService.java | 4 +-
.../cli/thrift/TCLIServiceConstants.java | 2 +-
.../cli/thrift/TCancelDelegationTokenReq.java | 4 +-
.../cli/thrift/TCancelDelegationTokenResp.java | 4 +-
.../service/cli/thrift/TCancelOperationReq.java | 4 +-
.../cli/thrift/TCancelOperationResp.java | 4 +-
.../service/cli/thrift/TCloseOperationReq.java | 4 +-
.../service/cli/thrift/TCloseOperationResp.java | 4 +-
.../service/cli/thrift/TCloseSessionReq.java | 4 +-
.../service/cli/thrift/TCloseSessionResp.java | 4 +-
.../apache/hive/service/cli/thrift/TColumn.java | 2 +-
.../hive/service/cli/thrift/TColumnDesc.java | 6 +-
.../hive/service/cli/thrift/TColumnValue.java | 2 +-
.../hive/service/cli/thrift/TDoubleColumn.java | 4 +-
.../hive/service/cli/thrift/TDoubleValue.java | 6 +-
.../cli/thrift/TExecuteStatementReq.java | 6 +-
.../cli/thrift/TExecuteStatementResp.java | 4 +-
.../service/cli/thrift/TFetchOrientation.java | 2 +-
.../service/cli/thrift/TFetchResultsReq.java | 8 +-
.../service/cli/thrift/TFetchResultsResp.java | 6 +-
.../service/cli/thrift/TGetCatalogsReq.java | 4 +-
.../service/cli/thrift/TGetCatalogsResp.java | 4 +-
.../hive/service/cli/thrift/TGetColumnsReq.java | 4 +-
.../service/cli/thrift/TGetColumnsResp.java | 4 +-
.../cli/thrift/TGetDelegationTokenReq.java | 4 +-
.../cli/thrift/TGetDelegationTokenResp.java | 4 +-
.../service/cli/thrift/TGetFunctionsReq.java | 4 +-
.../service/cli/thrift/TGetFunctionsResp.java | 4 +-
.../hive/service/cli/thrift/TGetInfoReq.java | 4 +-
.../hive/service/cli/thrift/TGetInfoResp.java | 4 +-
.../hive/service/cli/thrift/TGetInfoType.java | 2 +-
.../hive/service/cli/thrift/TGetInfoValue.java | 2 +-
.../cli/thrift/TGetOperationStatusReq.java | 4 +-
.../cli/thrift/TGetOperationStatusResp.java | 6 +-
.../cli/thrift/TGetResultSetMetadataReq.java | 4 +-
.../cli/thrift/TGetResultSetMetadataResp.java | 4 +-
.../hive/service/cli/thrift/TGetSchemasReq.java | 4 +-
.../service/cli/thrift/TGetSchemasResp.java | 4 +-
.../service/cli/thrift/TGetTableTypesReq.java | 4 +-
.../service/cli/thrift/TGetTableTypesResp.java | 4 +-
.../hive/service/cli/thrift/TGetTablesReq.java | 4 +-
.../hive/service/cli/thrift/TGetTablesResp.java | 4 +-
.../service/cli/thrift/TGetTypeInfoReq.java | 4 +-
.../service/cli/thrift/TGetTypeInfoResp.java | 4 +-
.../service/cli/thrift/THandleIdentifier.java | 4 +-
.../hive/service/cli/thrift/TI16Column.java | 4 +-
.../hive/service/cli/thrift/TI16Value.java | 6 +-
.../hive/service/cli/thrift/TI32Column.java | 4 +-
.../hive/service/cli/thrift/TI32Value.java | 6 +-
.../hive/service/cli/thrift/TI64Column.java | 4 +-
.../hive/service/cli/thrift/TI64Value.java | 6 +-
.../hive/service/cli/thrift/TMapTypeEntry.java | 8 +-
.../service/cli/thrift/TOpenSessionReq.java | 4 +-
.../service/cli/thrift/TOpenSessionResp.java | 4 +-
.../service/cli/thrift/TOperationHandle.java | 8 +-
.../service/cli/thrift/TOperationState.java | 2 +-
.../hive/service/cli/thrift/TOperationType.java | 2 +-
.../service/cli/thrift/TPrimitiveTypeEntry.java | 4 +-
.../service/cli/thrift/TProtocolVersion.java | 2 +-
.../cli/thrift/TRenewDelegationTokenReq.java | 4 +-
.../cli/thrift/TRenewDelegationTokenResp.java | 4 +-
.../apache/hive/service/cli/thrift/TRow.java | 4 +-
.../apache/hive/service/cli/thrift/TRowSet.java | 6 +-
.../hive/service/cli/thrift/TSessionHandle.java | 4 +-
.../apache/hive/service/cli/thrift/TStatus.java | 6 +-
.../hive/service/cli/thrift/TStatusCode.java | 2 +-
.../hive/service/cli/thrift/TStringColumn.java | 4 +-
.../hive/service/cli/thrift/TStringValue.java | 4 +-
.../service/cli/thrift/TStructTypeEntry.java | 4 +-
.../hive/service/cli/thrift/TTableSchema.java | 4 +-
.../hive/service/cli/thrift/TTypeDesc.java | 4 +-
.../hive/service/cli/thrift/TTypeEntry.java | 2 +-
.../apache/hive/service/cli/thrift/TTypeId.java | 2 +-
.../service/cli/thrift/TTypeQualifierValue.java | 2 +-
.../service/cli/thrift/TTypeQualifiers.java | 4 +-
.../service/cli/thrift/TUnionTypeEntry.java | 4 +-
.../cli/thrift/TUserDefinedTypeEntry.java | 4 +-
service/src/gen/thrift/gen-php/TCLIService.php | 3 +-
service/src/gen/thrift/gen-php/ThriftHive.php | 3 +-
service/src/gen/thrift/gen-php/Types.php | 4 +-
.../gen-py/TCLIService/TCLIService-remote | 2 +-
.../thrift/gen-py/TCLIService/TCLIService.py | 269 +-
.../gen/thrift/gen-py/TCLIService/constants.py | 2 +-
.../src/gen/thrift/gen-py/TCLIService/ttypes.py | 190 +-
.../gen-py/hive_service/ThriftHive-remote | 51 +-
.../thrift/gen-py/hive_service/ThriftHive.py | 135 +-
.../gen/thrift/gen-py/hive_service/constants.py | 2 +-
.../gen/thrift/gen-py/hive_service/ttypes.py | 20 +-
.../gen/thrift/gen-rb/hive_service_constants.rb | 2 +-
.../src/gen/thrift/gen-rb/hive_service_types.rb | 2 +-
.../src/gen/thrift/gen-rb/t_c_l_i_service.rb | 2 +-
.../thrift/gen-rb/t_c_l_i_service_constants.rb | 2 +-
.../gen/thrift/gen-rb/t_c_l_i_service_types.rb | 2 +-
service/src/gen/thrift/gen-rb/thrift_hive.rb | 2 +-
.../auth/LdapAuthenticationProviderImpl.java | 93 +-
.../cli/operation/HiveCommandOperation.java | 34 +-
.../cli/operation/LogDivertAppender.java | 2 +-
.../hive/service/cli/operation/Operation.java | 11 +
.../service/cli/operation/OperationManager.java | 11 +
.../service/cli/operation/SQLOperation.java | 33 +-
.../service/cli/session/HiveSessionImpl.java | 12 +
.../cli/session/HiveSessionImplwithUGI.java | 3 +-
.../service/cli/session/HiveSessionProxy.java | 6 +
.../service/cli/session/SessionManager.java | 6 +-
.../thrift/EmbeddedThriftBinaryCLIService.java | 2 +-
.../thrift/ThreadPoolExecutorWithOomHook.java | 55 +
.../cli/thrift/ThriftBinaryCLIService.java | 12 +-
.../service/cli/thrift/ThriftCLIService.java | 8 +-
.../cli/thrift/ThriftHttpCLIService.java | 17 +-
.../apache/hive/service/server/HiveServer2.java | 12 +-
.../hive/service/auth/TestPlainSaslHelper.java | 2 +-
.../session/TestPluggableHiveSessionImpl.java | 2 +-
.../cli/session/TestSessionGlobalInitFile.java | 2 +-
shims/0.20S/pom.xml | 63 -
.../hadoop/hive/shims/Hadoop20SShims.java | 733 -
.../apache/hadoop/hive/shims/Jetty20SShims.java | 53 -
.../apache/hadoop/mapred/WebHCatJTShim20S.java | 123 -
shims/0.23/pom.xml | 25 +-
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 106 +-
shims/aggregator/pom.xml | 6 -
shims/common/pom.xml | 4 +-
.../apache/hadoop/hive/shims/HadoopShims.java | 2 +-
.../apache/hadoop/hive/shims/ShimLoader.java | 17 +-
.../hive/thrift/HadoopThriftAuthBridge.java | 14 +-
shims/pom.xml | 1 -
shims/scheduler/pom.xml | 14 +-
.../apache/hive/spark/client/SparkClient.java | 5 +
.../hive/spark/client/SparkClientImpl.java | 5 +
.../org/apache/hive/spark/client/rpc/Rpc.java | 4 +
storage-api/pom.xml | 31 +-
.../org/apache/hadoop/hive/common/Pool.java | 32 +
.../apache/hadoop/hive/common/io/Allocator.java | 53 +
.../apache/hadoop/hive/common/io/DataCache.java | 100 +
.../apache/hadoop/hive/common/io/DiskRange.java | 102 +
.../hadoop/hive/common/io/DiskRangeList.java | 210 +
.../common/io/encoded/EncodedColumnBatch.java | 142 +
.../hive/common/io/encoded/MemoryBuffer.java | 28 +
.../hadoop/hive/common/type/HiveDecimal.java | 10 +-
.../hive/ql/exec/vector/BytesColumnVector.java | 47 +-
.../hive/ql/exec/vector/ColumnVector.java | 49 +-
.../ql/exec/vector/DecimalColumnVector.java | 59 +-
.../hive/ql/exec/vector/DoubleColumnVector.java | 37 +-
.../hive/ql/exec/vector/ListColumnVector.java | 119 +
.../hive/ql/exec/vector/LongColumnVector.java | 37 +-
.../hive/ql/exec/vector/MapColumnVector.java | 131 +
.../ql/exec/vector/MultiValuedColumnVector.java | 150 +
.../hive/ql/exec/vector/StructColumnVector.java | 124 +
.../hive/ql/exec/vector/UnionColumnVector.java | 134 +
.../hive/ql/io/sarg/SearchArgumentImpl.java | 2 +-
.../ql/exec/vector/TestListColumnVector.java | 200 +
.../ql/exec/vector/TestMapColumnVector.java | 224 +
.../ql/exec/vector/TestStructColumnVector.java | 95 +
.../ql/exec/vector/TestUnionColumnVector.java | 93 +
.../hive/ptest/execution/JIRAService.java | 187 +-
.../org/apache/hive/ptest/execution/PTest.java | 11 +-
.../hive/ptest/execution/TestCheckPhase.java | 77 +
.../ptest2/src/main/resources/batch-exec.vm | 2 +
.../hive/ptest/execution/TestJIRAService.java | 89 +-
...RAService.testErrorWithMessages.approved.txt | 20 +
...ervice.testErrorWithoutMessages.approved.txt | 14 +
.../TestJIRAService.testFailAdd.approved.txt | 21 +
.../TestJIRAService.testFailNoAdd.approved.txt | 21 +
.../TestJIRAService.testSuccessAdd.approved.txt | 16 +
...estJIRAService.testSuccessNoAdd.approved.txt | 16 +
.../ptest/execution/TestTestCheckPhase.java | 91 +
.../src/test/resources/HIVE-10761.6.patch | 2539 ++
.../src/test/resources/HIVE-11271.4.patch | 606 +
.../ptest2/src/test/resources/HIVE-9377.1.patch | 25 +
.../ptest2/src/test/resources/remove-test.patch | 33 +
.../resources/test-configuration.properties | 2 +
2138 files changed, 506231 insertions(+), 42284 deletions(-)
----------------------------------------------------------------------
[03/23] hive git commit: HIVE-11180: Enable native vectorized map
join for spark [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
new file mode 100644
index 0000000..cfc4753
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -0,0 +1,631 @@
+PREHOOK: query: -- Using cint and ctinyint in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: query: -- Using cint and ctinyint in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc1a
+PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc2a
+PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc3a
+PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc4a
+PREHOOK: query: select * from small_alltypesorc1a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc1a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+PREHOOK: query: select * from small_alltypesorc2a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc2a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+PREHOOK: query: select * from small_alltypesorc3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: select * from small_alltypesorc4a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc4a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@small_alltypesorc1a
+PREHOOK: Input: default@small_alltypesorc2a
+PREHOOK: Input: default@small_alltypesorc3a
+PREHOOK: Input: default@small_alltypesorc4a
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@small_alltypesorc1a
+POSTHOOK: Input: default@small_alltypesorc2a
+POSTHOOK: Input: default@small_alltypesorc3a
+POSTHOOK: Input: default@small_alltypesorc4a
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+PREHOOK: query: select * from small_alltypesorc_a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc_a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select *
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select *
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col2 (type: int)
+ 1 _col2 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col2 (type: int)
+ 1 _col2 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select *
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select *
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL -64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL -64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL -64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL -64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL -64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL -64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL -64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL -64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL -64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select c.ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select c.ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select c.ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select c.ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain
+select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col1 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col1 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 17 Data size: 4736 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(_col0)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.ctinyint = c.ctinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+145 -8960
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
new file mode 100644
index 0000000..0015708
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -0,0 +1,327 @@
+PREHOOK: query: -- Using cint and cbigint in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: query: -- Using cint and cbigint in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc1a
+PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc2a
+PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc3a
+PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc4a
+PREHOOK: query: select * from small_alltypesorc1a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc1a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: select * from small_alltypesorc2a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc2a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+PREHOOK: query: select * from small_alltypesorc3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+NULL -13166 626923679 NULL NULL -13166.0 821UdmGbkEf4j NULL 1969-12-31 15:59:55.089 1969-12-31 16:00:15.69 true NULL
+NULL -14426 626923679 NULL NULL -14426.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.505 1969-12-31 16:00:13.309 true NULL
+NULL -14847 626923679 NULL NULL -14847.0 821UdmGbkEf4j NULL 1969-12-31 16:00:00.612 1969-12-31 15:59:43.704 true NULL
+NULL -15632 528534767 NULL NULL -15632.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:53.593 true NULL
+NULL -15830 253665376 NULL NULL -15830.0 1cGVWH7n1QU NULL 1969-12-31 16:00:02.582 1969-12-31 16:00:00.518 true NULL
+PREHOOK: query: select * from small_alltypesorc4a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc4a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+-60 -200 NULL NULL -60.0 -200.0 NULL NULL 1969-12-31 16:00:11.996 1969-12-31 15:59:55.451 NULL NULL
+-61 -7196 NULL NULL -61.0 -7196.0 NULL 8Mlns2Tl6E0g 1969-12-31 15:59:44.823 1969-12-31 15:59:58.174 NULL false
+-61 -7196 NULL NULL -61.0 -7196.0 NULL fUJIN 1969-12-31 16:00:11.842 1969-12-31 15:59:58.174 NULL false
+-62 -7196 NULL NULL -62.0 -7196.0 NULL jf1Cw6qhkNToQuud 1969-12-31 16:00:12.388 1969-12-31 15:59:58.174 NULL false
+-62 -7196 NULL NULL -62.0 -7196.0 NULL yLiOchx5PfDTFdcMduBTg 1969-12-31 16:00:02.373 1969-12-31 15:59:58.174 NULL false
+PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@small_alltypesorc1a
+PREHOOK: Input: default@small_alltypesorc2a
+PREHOOK: Input: default@small_alltypesorc3a
+PREHOOK: Input: default@small_alltypesorc4a
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@small_alltypesorc1a
+POSTHOOK: Input: default@small_alltypesorc2a
+POSTHOOK: Input: default@small_alltypesorc3a
+POSTHOOK: Input: default@small_alltypesorc4a
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+PREHOOK: query: select * from small_alltypesorc_a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc_a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+-60 -200 NULL NULL -60.0 -200.0 NULL NULL 1969-12-31 16:00:11.996 1969-12-31 15:59:55.451 NULL NULL
+-61 -7196 NULL NULL -61.0 -7196.0 NULL 8Mlns2Tl6E0g 1969-12-31 15:59:44.823 1969-12-31 15:59:58.174 NULL false
+-61 -7196 NULL NULL -61.0 -7196.0 NULL fUJIN 1969-12-31 16:00:11.842 1969-12-31 15:59:58.174 NULL false
+-62 -7196 NULL NULL -62.0 -7196.0 NULL jf1Cw6qhkNToQuud 1969-12-31 16:00:12.388 1969-12-31 15:59:58.174 NULL false
+-62 -7196 NULL NULL -62.0 -7196.0 NULL yLiOchx5PfDTFdcMduBTg 1969-12-31 16:00:02.373 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+NULL -13166 626923679 NULL NULL -13166.0 821UdmGbkEf4j NULL 1969-12-31 15:59:55.089 1969-12-31 16:00:15.69 true NULL
+NULL -14426 626923679 NULL NULL -14426.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.505 1969-12-31 16:00:13.309 true NULL
+NULL -14847 626923679 NULL NULL -14847.0 821UdmGbkEf4j NULL 1969-12-31 16:00:00.612 1969-12-31 15:59:43.704 true NULL
+NULL -15632 528534767 NULL NULL -15632.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:53.593 true NULL
+NULL -15830 253665376 NULL NULL -15830.0 1cGVWH7n1QU NULL 1969-12-31 16:00:02.582 1969-12-31 16:00:00.518 true NULL
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cbigint = c.cbigint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cbigint = c.cbigint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cbigint (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col1 (type: bigint)
+ 1 _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int), cbigint (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col1
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 5561 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col1 (type: bigint)
+ 1 _col0 (type: bigint)
+ outputColumnNames: _col1
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 24 Data size: 6117 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 24 Data size: 6117 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(_col0)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cbigint = c.cbigint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cbigint = c.cbigint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+34 -26289186744
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
new file mode 100644
index 0000000..b029e1c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -0,0 +1,630 @@
+PREHOOK: query: -- Using cint and cstring1 in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: query: -- Using cint and cstring1 in test queries
+create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc1a
+PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc2a
+PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc3a
+PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc4a
+PREHOOK: query: select * from small_alltypesorc1a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc1a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc1a
+#### A masked pattern was here ####
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: select * from small_alltypesorc2a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc2a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc2a
+#### A masked pattern was here ####
+-51 NULL NULL -1731061911 -51.0 NULL Pw53BBJ yL443x2437PO5Hv1U3lCjq2D 1969-12-31 16:00:08.451 NULL true false
+-51 NULL NULL -1846191223 -51.0 NULL Ul085f84S33Xd32u x1JC58g0Ukp 1969-12-31 16:00:08.451 NULL true true
+-51 NULL NULL -1874052220 -51.0 NULL c61B47I604gymFJ sjWQS78 1969-12-31 16:00:08.451 NULL false false
+-51 NULL NULL -1927203921 -51.0 NULL 45ja5suO 42S0I0 1969-12-31 16:00:08.451 NULL true true
+-51 NULL NULL -1970551565 -51.0 NULL r2uhJH3 loXMWyrHjVeK 1969-12-31 16:00:08.451 NULL false false
+PREHOOK: query: select * from small_alltypesorc3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc3a
+#### A masked pattern was here ####
+-51 NULL -31312632 1086455747 -51.0 NULL NULL Bc7xt12568c451o64LF5 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -337975743 608681041 -51.0 NULL NULL Ih2r28o6 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -413196097 -306198070 -51.0 NULL NULL F53QcSDPpxYF1Ub 1969-12-31 16:00:08.451 NULL NULL false
+-51 NULL -591488718 803603078 -51.0 NULL NULL X616UtmmA3FHan 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -738306196 -460430946 -51.0 NULL NULL dBOqv 1969-12-31 16:00:08.451 NULL NULL false
+PREHOOK: query: select * from small_alltypesorc4a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc4a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc4a
+#### A masked pattern was here ####
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@small_alltypesorc1a
+PREHOOK: Input: default@small_alltypesorc2a
+PREHOOK: Input: default@small_alltypesorc3a
+PREHOOK: Input: default@small_alltypesorc4a
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from
+(select * from (select * from small_alltypesorc1a) sq1
+ union all
+ select * from (select * from small_alltypesorc2a) sq2
+ union all
+ select * from (select * from small_alltypesorc3a) sq3
+ union all
+ select * from (select * from small_alltypesorc4a) sq4) q
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@small_alltypesorc1a
+POSTHOOK: Input: default@small_alltypesorc2a
+POSTHOOK: Input: default@small_alltypesorc3a
+POSTHOOK: Input: default@small_alltypesorc4a
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+PREHOOK: query: select * from small_alltypesorc_a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc_a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+-51 NULL -31312632 1086455747 -51.0 NULL NULL Bc7xt12568c451o64LF5 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -337975743 608681041 -51.0 NULL NULL Ih2r28o6 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -413196097 -306198070 -51.0 NULL NULL F53QcSDPpxYF1Ub 1969-12-31 16:00:08.451 NULL NULL false
+-51 NULL -591488718 803603078 -51.0 NULL NULL X616UtmmA3FHan 1969-12-31 16:00:08.451 NULL NULL true
+-51 NULL -738306196 -460430946 -51.0 NULL NULL dBOqv 1969-12-31 16:00:08.451 NULL NULL false
+-51 NULL NULL -1731061911 -51.0 NULL Pw53BBJ yL443x2437PO5Hv1U3lCjq2D 1969-12-31 16:00:08.451 NULL true false
+-51 NULL NULL -1846191223 -51.0 NULL Ul085f84S33Xd32u x1JC58g0Ukp 1969-12-31 16:00:08.451 NULL true true
+-51 NULL NULL -1874052220 -51.0 NULL c61B47I604gymFJ sjWQS78 1969-12-31 16:00:08.451 NULL false false
+-51 NULL NULL -1927203921 -51.0 NULL 45ja5suO 42S0I0 1969-12-31 16:00:08.451 NULL true true
+-51 NULL NULL -1970551565 -51.0 NULL r2uhJH3 loXMWyrHjVeK 1969-12-31 16:00:08.451 NULL false false
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cstring1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int), cstring1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col1
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+20
+PREHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cstring2 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cstring1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cstring1 (type: string), cstring2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+28
+PREHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1 and hd.cint = c.cint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1 and hd.cint = c.cint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cbigint (type: bigint), cstring2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col3 (type: string), _col1 (type: bigint)
+ 1 _col1 (type: string), _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int), cstring1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col2 (type: string), _col0 (type: int)
+ 1 _col1 (type: string), _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col3 (type: string), _col1 (type: bigint)
+ 1 _col1 (type: string), _col0 (type: bigint)
+ outputColumnNames: _col0, _col2
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col2 (type: string), _col0 (type: int)
+ 1 _col1 (type: string), _col0 (type: int)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1 and hd.cint = c.cint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.cstring1
+from small_alltypesorc_a c
+left outer join small_alltypesorc_a cd
+ on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint
+left outer join small_alltypesorc_a hd
+ on hd.cstring1 = c.cstring1 and hd.cint = c.cint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_a
+#### A masked pattern was here ####
+28
[13/23] hive git commit: HIVE-12283: Fix test failures after
HIVE-11844 [Spark Branch] (Rui via Xuefu)
Posted by xu...@apache.org.
HIVE-12283: Fix test failures after HIVE-11844 [Spark Branch] (Rui via Xuefu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/51f257af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/51f257af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/51f257af
Branch: refs/heads/master
Commit: 51f257af0d881bfb7978603e55066db6b4e7af05
Parents: 8436319
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Oct 28 05:08:07 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Oct 28 05:08:07 2015 -0700
----------------------------------------------------------------------
.../spark/vector_inner_join.q.out | 12 ++++++------
.../spark/vector_outer_join2.q.out | 20 ++++++++------------
2 files changed, 14 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/51f257af/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
index d1b775f..bf7090b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
@@ -140,17 +140,17 @@ STAGE PLANS:
alias: t1
Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
Filter Operator
- predicate: a is not null (type: boolean)
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Group By Operator
keys: _col0 (type: int)
mode: hash
outputColumnNames: _col0
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: int)
@@ -184,10 +184,10 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 2
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/51f257af/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index 0015708..38051fd 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -262,19 +262,15 @@ STAGE PLANS:
input vertices:
1 Map 4
Statistics: Num rows: 24 Data size: 6117 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col1 (type: bigint)
- outputColumnNames: _col0
- Statistics: Num rows: 24 Data size: 6117 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: count(), sum(_col0)
- mode: hash
- outputColumnNames: _col0, _col1
+ Group By Operator
+ aggregations: count(), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
Local Work:
Map Reduce Local Work
Execution mode: vectorized
[12/23] hive git commit: HIVE-11473: Upgrade Spark dependency to 1.5
[Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
HIVE-11473: Upgrade Spark dependency to 1.5 [Spark Branch] (Rui reviewed by Xuefu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/84363196
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/84363196
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/84363196
Branch: refs/heads/master
Commit: 84363196bda0f6f2c03884cf633913c79dec30a3
Parents: 13eb409
Author: Rui Li <ru...@intel.com>
Authored: Thu Oct 22 13:58:46 2015 +0800
Committer: Rui Li <ru...@intel.com>
Committed: Thu Oct 22 13:59:30 2015 +0800
----------------------------------------------------------------------
pom.xml | 12 +--
.../spark/status/impl/JobMetricsListener.java | 88 +-------------------
.../apache/hive/spark/client/RemoteDriver.java | 60 +------------
3 files changed, 5 insertions(+), 155 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/84363196/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index b55e86a..0cd4238 100644
--- a/pom.xml
+++ b/pom.xml
@@ -161,7 +161,7 @@
<ST4.version>4.0.4</ST4.version>
<tez.version>0.5.2</tez.version>
<super-csv.version>2.2.0</super-csv.version>
- <spark.version>1.4.0</spark.version>
+ <spark.version>1.5.0</spark.version>
<scala.binary.version>2.10</scala.binary.version>
<scala.version>2.10.4</scala.version>
<tempus-fugit.version>1.1</tempus-fugit.version>
@@ -222,16 +222,6 @@
<enabled>false</enabled>
</snapshots>
</repository>
- <repository>
- <id>spark-1.3</id>
- <url>https://s3-us-west-1.amazonaws.com/hive-spark/maven2/spark_2.10-1.3-rc1/</url>
- <releases>
- <enabled>true</enabled>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
</repositories>
<!-- Hadoop dependency management is done at the bottom under profiles -->
http://git-wip-us.apache.org/repos/asf/hive/blob/84363196/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
index 51772cd..52f4b9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
@@ -23,29 +23,15 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.spark.JavaSparkListener;
import org.apache.spark.executor.TaskMetrics;
-import org.apache.spark.scheduler.SparkListener;
-import org.apache.spark.scheduler.SparkListenerApplicationEnd;
-import org.apache.spark.scheduler.SparkListenerApplicationStart;
-import org.apache.spark.scheduler.SparkListenerBlockManagerAdded;
-import org.apache.spark.scheduler.SparkListenerBlockManagerRemoved;
-import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate;
-import org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate;
-import org.apache.spark.scheduler.SparkListenerJobEnd;
import org.apache.spark.scheduler.SparkListenerJobStart;
-import org.apache.spark.scheduler.SparkListenerStageCompleted;
-import org.apache.spark.scheduler.SparkListenerStageSubmitted;
import org.apache.spark.scheduler.SparkListenerTaskEnd;
-import org.apache.spark.scheduler.SparkListenerTaskGettingResult;
-import org.apache.spark.scheduler.SparkListenerTaskStart;
-import org.apache.spark.scheduler.SparkListenerUnpersistRDD;
-import org.apache.spark.scheduler.SparkListenerExecutorRemoved;
-import org.apache.spark.scheduler.SparkListenerExecutorAdded;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-public class JobMetricsListener implements SparkListener {
+public class JobMetricsListener extends JavaSparkListener {
private static final Log LOG = LogFactory.getLog(JobMetricsListener.class);
@@ -54,36 +40,6 @@ public class JobMetricsListener implements SparkListener {
private final Map<Integer, Map<String, List<TaskMetrics>>> allJobMetrics = Maps.newHashMap();
@Override
- public void onExecutorRemoved(SparkListenerExecutorRemoved removed) {
-
- }
-
- @Override
- public void onExecutorAdded(SparkListenerExecutorAdded added) {
-
- }
-
- @Override
- public void onStageCompleted(SparkListenerStageCompleted stageCompleted) {
-
- }
-
- @Override
- public void onStageSubmitted(SparkListenerStageSubmitted stageSubmitted) {
-
- }
-
- @Override
- public void onTaskStart(SparkListenerTaskStart taskStart) {
-
- }
-
- @Override
- public void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult) {
-
- }
-
- @Override
public synchronized void onTaskEnd(SparkListenerTaskEnd taskEnd) {
int stageId = taskEnd.stageId();
int stageAttemptId = taskEnd.stageAttemptId();
@@ -119,46 +75,6 @@ public class JobMetricsListener implements SparkListener {
jobIdToStageId.put(jobId, intStageIds);
}
- @Override
- public synchronized void onJobEnd(SparkListenerJobEnd jobEnd) {
-
- }
-
- @Override
- public void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate) {
-
- }
-
- @Override
- public void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded) {
-
- }
-
- @Override
- public void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved) {
-
- }
-
- @Override
- public void onUnpersistRDD(SparkListenerUnpersistRDD unpersistRDD) {
-
- }
-
- @Override
- public void onApplicationStart(SparkListenerApplicationStart applicationStart) {
-
- }
-
- @Override
- public void onApplicationEnd(SparkListenerApplicationEnd applicationEnd) {
-
- }
-
- @Override
- public void onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate) {
-
- }
-
public synchronized Map<String, List<TaskMetrics>> getJobMetric(int jobId) {
return allJobMetrics.get(jobId);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/84363196/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java b/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java
index b77c9e8..f5b1e48 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/RemoteDriver.java
@@ -43,26 +43,13 @@ import org.apache.hive.spark.client.metrics.Metrics;
import org.apache.hive.spark.client.rpc.Rpc;
import org.apache.hive.spark.client.rpc.RpcConfiguration;
import org.apache.hive.spark.counter.SparkCounters;
+import org.apache.spark.JavaSparkListener;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaFutureAction;
import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.scheduler.SparkListener;
-import org.apache.spark.scheduler.SparkListenerApplicationEnd;
-import org.apache.spark.scheduler.SparkListenerApplicationStart;
-import org.apache.spark.scheduler.SparkListenerBlockManagerAdded;
-import org.apache.spark.scheduler.SparkListenerBlockManagerRemoved;
-import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate;
-import org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate;
import org.apache.spark.scheduler.SparkListenerJobEnd;
import org.apache.spark.scheduler.SparkListenerJobStart;
-import org.apache.spark.scheduler.SparkListenerStageCompleted;
-import org.apache.spark.scheduler.SparkListenerStageSubmitted;
import org.apache.spark.scheduler.SparkListenerTaskEnd;
-import org.apache.spark.scheduler.SparkListenerTaskGettingResult;
-import org.apache.spark.scheduler.SparkListenerTaskStart;
-import org.apache.spark.scheduler.SparkListenerUnpersistRDD;
-import org.apache.spark.scheduler.SparkListenerExecutorRemoved;
-import org.apache.spark.scheduler.SparkListenerExecutorAdded;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -438,21 +425,11 @@ public class RemoteDriver {
}
- private class ClientListener implements SparkListener {
+ private class ClientListener extends JavaSparkListener {
private final Map<Integer, Integer> stageToJobId = Maps.newHashMap();
@Override
- public void onExecutorRemoved(SparkListenerExecutorRemoved removed) {
-
- }
-
- @Override
- public void onExecutorAdded(SparkListenerExecutorAdded added) {
-
- }
-
- @Override
public void onJobStart(SparkListenerJobStart jobStart) {
synchronized (stageToJobId) {
for (int i = 0; i < jobStart.stageIds().length(); i++) {
@@ -500,39 +477,6 @@ public class RemoteDriver {
}
}
- @Override
- public void onStageCompleted(SparkListenerStageCompleted stageCompleted) { }
-
- @Override
- public void onStageSubmitted(SparkListenerStageSubmitted stageSubmitted) { }
-
- @Override
- public void onTaskStart(SparkListenerTaskStart taskStart) { }
-
- @Override
- public void onTaskGettingResult(SparkListenerTaskGettingResult taskGettingResult) { }
-
- @Override
- public void onEnvironmentUpdate(SparkListenerEnvironmentUpdate environmentUpdate) { }
-
- @Override
- public void onBlockManagerAdded(SparkListenerBlockManagerAdded blockManagerAdded) { }
-
- @Override
- public void onBlockManagerRemoved(SparkListenerBlockManagerRemoved blockManagerRemoved) { }
-
- @Override
- public void onUnpersistRDD(SparkListenerUnpersistRDD unpersistRDD) { }
-
- @Override
- public void onApplicationStart(SparkListenerApplicationStart applicationStart) { }
-
- @Override
- public void onApplicationEnd(SparkListenerApplicationEnd applicationEnd) { }
-
- @Override
- public void onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate executorMetricsUpdate) { }
-
/**
* Returns the client job ID for the given Spark job ID.
*
[09/23] hive git commit: HIVE-12091: Merge file doesn't work for ORC
table when running on Spark. [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
new file mode 100644
index 0000000..d092e6a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
@@ -0,0 +1,240 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 5 files total
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 5 files total
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 6 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
new file mode 100644
index 0000000..90a8f59
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
@@ -0,0 +1,370 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 4 items
+#### A masked pattern was here ####
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ st 80.0
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
[02/23] hive git commit: HIVE-11180: Enable native vectorized map
join for spark [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
new file mode 100644
index 0000000..182dbb0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -0,0 +1,1000 @@
+PREHOOK: query: -- Using cint and ctinyint in test queries
+create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc1b
+POSTHOOK: query: -- Using cint and ctinyint in test queries
+create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc1b
+PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc2b
+POSTHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc2b
+PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc3b
+POSTHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc3b
+PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc4b
+POSTHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc4b
+PREHOOK: query: select * from small_alltypesorc1b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc1b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc1b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc1b
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+PREHOOK: query: select * from small_alltypesorc2b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc2b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc2b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc2b
+#### A masked pattern was here ####
+-64 -200 NULL -1809444706 -64.0 -200.0 NULL B87YVb3UASqg 1969-12-31 16:00:10.858 1969-12-31 15:59:55.451 NULL true
+-64 -200 NULL 2118653994 -64.0 -200.0 NULL ONHGSDy1U4Ft431DfQp15 1969-12-31 16:00:03.944 1969-12-31 15:59:55.451 NULL true
+-64 -200 NULL 927647669 -64.0 -200.0 NULL DhxkBT 1969-12-31 16:00:00.199 1969-12-31 15:59:55.451 NULL false
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 1090418478 -64.0 -7196.0 NULL 3E06w2 1969-12-31 16:00:00.29 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 1805860756 -64.0 -7196.0 NULL 4aOn4s2ATygu0476eD 1969-12-31 16:00:12.339 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+PREHOOK: query: select * from small_alltypesorc3b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc3b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc3b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc3b
+#### A masked pattern was here ####
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -609074876 -1887561756 NULL NULL EcM71 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:55.061 true false
+NULL NULL -700300206 -1887561756 NULL NULL kdqQE010 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:58.384 false false
+NULL NULL -726473298 1864027286 NULL NULL OFy1a1xf37f75b5N 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:11.799 true true
+NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:11.55 true false
+NULL NULL -838810013 1864027286 NULL NULL N016jPED08o 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:44.252 false true
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: select * from small_alltypesorc4b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc4b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc4b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc4b
+#### A masked pattern was here ####
+PREHOOK: query: create table small_alltypesorc_b stored as orc as select * from
+(select * from (select * from small_alltypesorc1b) sq1
+ union all
+ select * from (select * from small_alltypesorc2b) sq2
+ union all
+ select * from (select * from small_alltypesorc3b) sq3
+ union all
+ select * from (select * from small_alltypesorc4b) sq4) q
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@small_alltypesorc1b
+PREHOOK: Input: default@small_alltypesorc2b
+PREHOOK: Input: default@small_alltypesorc3b
+PREHOOK: Input: default@small_alltypesorc4b
+PREHOOK: Output: database:default
+PREHOOK: Output: default@small_alltypesorc_b
+POSTHOOK: query: create table small_alltypesorc_b stored as orc as select * from
+(select * from (select * from small_alltypesorc1b) sq1
+ union all
+ select * from (select * from small_alltypesorc2b) sq2
+ union all
+ select * from (select * from small_alltypesorc3b) sq3
+ union all
+ select * from (select * from small_alltypesorc4b) sq4) q
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@small_alltypesorc1b
+POSTHOOK: Input: default@small_alltypesorc2b
+POSTHOOK: Input: default@small_alltypesorc3b
+POSTHOOK: Input: default@small_alltypesorc4b
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@small_alltypesorc_b
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+PREHOOK: Output: default@small_alltypesorc_b
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+POSTHOOK: Output: default@small_alltypesorc_b
+PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+PREHOOK: query: select * from small_alltypesorc_b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from small_alltypesorc_b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -200 NULL -1809444706 -64.0 -200.0 NULL B87YVb3UASqg 1969-12-31 16:00:10.858 1969-12-31 15:59:55.451 NULL true
+-64 -200 NULL 2118653994 -64.0 -200.0 NULL ONHGSDy1U4Ft431DfQp15 1969-12-31 16:00:03.944 1969-12-31 15:59:55.451 NULL true
+-64 -200 NULL 927647669 -64.0 -200.0 NULL DhxkBT 1969-12-31 16:00:00.199 1969-12-31 15:59:55.451 NULL false
+-64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 1090418478 -64.0 -7196.0 NULL 3E06w2 1969-12-31 16:00:00.29 1969-12-31 15:59:58.174 NULL true
+-64 -7196 NULL 1805860756 -64.0 -7196.0 NULL 4aOn4s2ATygu0476eD 1969-12-31 16:00:12.339 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -609074876 -1887561756 NULL NULL EcM71 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:55.061 true false
+NULL NULL -700300206 -1887561756 NULL NULL kdqQE010 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:58.384 false false
+NULL NULL -726473298 1864027286 NULL NULL OFy1a1xf37f75b5N 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:11.799 true true
+NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:11.55 true false
+NULL NULL -838810013 1864027286 NULL NULL N016jPED08o 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:44.252 false true
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select *
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select *
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col2 (type: int)
+ 1 _col2 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col2 (type: int)
+ 1 _col2 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select *
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select *
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL -64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL -64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL -64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL -64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL -64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL -64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL
+-64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL -64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -200 NULL -1809444706 -64.0 -200.0 NULL B87YVb3UASqg 1969-12-31 16:00:10.858 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -200 NULL 2118653994 -64.0 -200.0 NULL ONHGSDy1U4Ft431DfQp15 1969-12-31 16:00:03.944 1969-12-31 15:59:55.451 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -200 NULL 927647669 -64.0 -200.0 NULL DhxkBT 1969-12-31 16:00:00.199 1969-12-31 15:59:55.451 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL -64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL
+-64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL -64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL -64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL -64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL -64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL -64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL -64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL -64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL -64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL -64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL -64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL -64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL -64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL
+-64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL -64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL -64 -10462 626923679 NULL -64.0 -10462.0 821UdmGbkEf4j NULL 1969-12-31 16:00:02.496 1969-12-31 16:00:00.164 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL -64 -3586 626923679 NULL -64.0 -3586.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.952 1969-12-31 15:59:51.131 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL -64 -4018 626923679 NULL -64.0 -4018.0 821UdmGbkEf4j NULL 1969-12-31 15:59:58.959 1969-12-31 16:00:07.803 true NULL
+-64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL -64 -4803 626923679 NULL -64.0 -4803.0 821UdmGbkEf4j NULL 1969-12-31 16:00:04.662 1969-12-31 16:00:01.609 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL -64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL -64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL -64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+-64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 1090418478 -64.0 -7196.0 NULL 3E06w2 1969-12-31 16:00:00.29 1969-12-31 15:59:58.174 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 1805860756 -64.0 -7196.0 NULL 4aOn4s2ATygu0476eD 1969-12-31 16:00:12.339 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL -64 -15920 528534767 NULL -64.0 -15920.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:51.859 1969-12-31 16:00:14.468 true NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL -64 -4040 528534767 NULL -64.0 -4040.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.733 1969-12-31 15:59:46.044 true NULL
+-64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL -64 -8080 528534767 NULL -64.0 -8080.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.044 1969-12-31 15:59:48.655 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL -64 -3097 253665376 NULL -64.0 -3097.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.013 1969-12-31 16:00:06.097 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL -64 -6907 253665376 NULL -64.0 -6907.0 1cGVWH7n1QU NULL NULL 1969-12-31 15:59:53.66 true NULL
+-64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL -64 -9842 253665376 NULL -64.0 -9842.0 1cGVWH7n1QU NULL 1969-12-31 16:00:00.631 1969-12-31 16:00:01.781 true NULL
+NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false
+NULL NULL -609074876 -1887561756 NULL NULL EcM71 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:55.061 true false NULL NULL -609074876 -1887561756 NULL NULL EcM71 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:55.061 true false
+NULL NULL -700300206 -1887561756 NULL NULL kdqQE010 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:58.384 false false NULL NULL -700300206 -1887561756 NULL NULL kdqQE010 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:58.384 false false
+NULL NULL -726473298 1864027286 NULL NULL OFy1a1xf37f75b5N 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:11.799 true true NULL NULL -726473298 1864027286 NULL NULL OFy1a1xf37f75b5N 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:11.799 true true
+NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:11.55 true false NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:11.55 true false
+NULL NULL -838810013 1864027286 NULL NULL N016jPED08o 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:44.252 false true NULL NULL -838810013 1864027286 NULL NULL N016jPED08o 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:44.252 false true
+NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false
+NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false
+NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false
+NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false
+PREHOOK: query: explain
+select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+-64
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain
+select count(*) from (select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col1 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+ Edges:
+ Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cint (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col1 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: tinyint)
+ 1 _col0 (type: tinyint)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 36 Data size: 5199 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select count(*) from (select c.ctinyint
+from small_alltypesorc_b c
+left outer join small_alltypesorc_b cd
+ on cd.cint = c.cint
+left outer join small_alltypesorc_b hd
+ on hd.ctinyint = c.ctinyint
+) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@small_alltypesorc_b
+#### A masked pattern was here ####
+890
[19/23] hive git commit: HIVE-12390: Merge branch 'master' into spark
Posted by xu...@apache.org.
HIVE-12390: Merge branch 'master' into spark
Conflicts:
ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cad0ea6a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cad0ea6a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cad0ea6a
Branch: refs/heads/master
Commit: cad0ea6a98c71aa505e74e37b2558c50d13ba0f2
Parents: b02cd4a 206974a
Author: xzhang <xz...@xzdt>
Authored: Wed Nov 11 19:57:30 2015 -0800
Committer: xzhang <xz...@xzdt>
Committed: Wed Nov 11 19:57:30 2015 -0800
----------------------------------------------------------------------
accumulo-handler/pom.xml | 4 -
.../hadoop/hive/accumulo/LazyAccumuloRow.java | 5 +-
.../org/apache/hadoop/hive/accumulo/Utils.java | 5 +-
.../hive/accumulo/columns/ColumnMapper.java | 5 +-
.../accumulo/columns/ColumnMappingFactory.java | 5 +-
.../columns/HiveAccumuloColumnMapping.java | 5 +-
.../hive/accumulo/mr/HiveAccumuloSplit.java | 5 +-
.../predicate/AccumuloPredicateHandler.java | 5 +-
.../predicate/PrimitiveComparisonFilter.java | 5 +-
.../hive/accumulo/predicate/PushdownTuple.java | 5 +-
.../predicate/compare/StringCompare.java | 3 -
.../accumulo/serde/AccumuloRowSerializer.java | 5 +-
.../accumulo/serde/AccumuloSerDeParameters.java | 5 +-
.../serde/CompositeAccumuloRowIdFactory.java | 5 +-
.../predicate/TestAccumuloPredicateHandler.java | 3 -
.../serde/DelimitedAccumuloRowIdFactory.java | 5 +-
.../serde/FirstCharAccumuloCompositeRowId.java | 5 +-
.../hive/accumulo/serde/TestAccumuloSerDe.java | 3 -
beeline/pom.xml | 5 -
.../java/org/apache/hive/beeline/BeeLine.java | 22 +-
.../apache/hive/beeline/ClassNameCompleter.java | 6 +-
.../apache/hive/beeline/HiveSchemaHelper.java | 2 +-
.../org/apache/hive/beeline/SQLCompleter.java | 6 +-
.../apache/hive/beeline/util/QFileClient.java | 8 +-
beeline/src/main/resources/beeline-log4j2.xml | 4 +-
.../hive/beeline/TestBeelineArgParsing.java | 24 +-
.../apache/hive/beeline/cli/TestHiveCli.java | 6 +-
cli/pom.xml | 5 -
.../org/apache/hadoop/hive/cli/CliDriver.java | 18 +-
.../hadoop/hive/cli/OptionsProcessor.java | 6 +-
common/pom.xml | 10 -
.../hadoop/hive/common/CallableWithNdc.java | 44 -
.../hadoop/hive/common/CompressionUtils.java | 22 +-
.../apache/hadoop/hive/common/FileUtils.java | 8 +-
.../hadoop/hive/common/JvmPauseMonitor.java | 11 +-
.../org/apache/hadoop/hive/common/LogUtils.java | 6 +-
.../hadoop/hive/common/RunnableWithNdc.java | 43 -
.../apache/hadoop/hive/common/ServerUtils.java | 6 +-
.../common/io/NonSyncByteArrayOutputStream.java | 4 +
.../common/jsonexplain/tez/TezJsonParser.java | 8 +-
.../hive/common/jsonexplain/tez/Vertex.java | 9 +-
.../metrics/metrics2/CodahaleMetrics.java | 17 +-
.../org/apache/hadoop/hive/conf/HiveConf.java | 88 +-
.../hadoop/hive/conf/SystemVariables.java | 6 +-
.../hadoop/hive/conf/VariableSubstitution.java | 8 +-
.../apache/hadoop/hive/ql/log/PerfLogger.java | 8 +-
.../java/org/apache/hive/common/HiveCompat.java | 6 +-
.../hive/common/util/FixedSizedObjectPool.java | 6 +-
.../apache/hive/common/util/HashCodeUtil.java | 132 +
.../hive/common/util/HiveStringUtils.java | 2 +-
.../apache/hive/common/util/HiveTestUtils.java | 8 +-
.../hive/common/util/HiveVersionInfo.java | 6 +-
.../hive/common/util/ShutdownHookManager.java | 6 +-
.../common/util/TestFixedSizedObjectPool.java | 9 +-
contrib/pom.xml | 5 -
.../genericudf/example/GenericUDFDBOutput.java | 8 +-
.../hive/contrib/serde2/MultiDelimitSerDe.java | 5 +-
.../hadoop/hive/contrib/serde2/RegexSerDe.java | 6 +-
.../hive/contrib/serde2/TypedBytesSerDe.java | 6 +-
.../contrib/serde2/s3/S3LogDeserializer.java | 6 +-
errata.txt | 2 +-
hbase-handler/pom.xml | 5 -
.../AbstractHBaseKeyPredicateDecomposer.java | 8 +-
.../hive/hbase/CompositeHBaseKeyFactory.java | 8 +-
.../hive/hbase/HBaseLazyObjectFactory.java | 28 +
.../apache/hadoop/hive/hbase/HBaseSerDe.java | 11 +-
.../hadoop/hive/hbase/HBaseSerDeHelper.java | 8 +-
.../hadoop/hive/hbase/HBaseStorageHandler.java | 6 +-
.../HBaseTableSnapshotInputFormatUtil.java | 6 +-
.../hive/hbase/HiveHBaseTableInputFormat.java | 6 +-
.../hive/hbase/HiveHBaseTableOutputFormat.java | 6 +-
.../hive/hbase/HiveHFileOutputFormat.java | 6 +-
.../src/test/queries/positive/hbase_queries.q | 4 +-
.../results/positive/external_table_ppd.q.out | 16 +-
.../positive/hbase_binary_storage_queries.q.out | 32 +-
.../test/results/positive/hbase_queries.q.out | 37 +-
.../test/results/positive/hbase_timestamp.q.out | 6 +-
.../positive/hbase_timestamp_format.q.out | 12 +-
.../org/apache/hive/hcatalog/cli/HCatCli.java | 8 +-
.../mapreduce/HCatBaseOutputFormat.java | 2 -
.../hive/hcatalog/mapreduce/SpecialCases.java | 16 +-
.../hive/hcatalog/cli/TestSemanticAnalysis.java | 1 +
.../listener/DbNotificationListener.java | 6 +-
.../messaging/json/JSONMessageFactory.java | 6 +-
.../streaming/AbstractRecordWriter.java | 6 +-
.../hcatalog/streaming/ConnectionError.java | 4 +
.../streaming/DelimitedInputWriter.java | 6 +-
.../hive/hcatalog/streaming/HiveEndPoint.java | 57 +-
.../hive/hcatalog/streaming/InvalidTable.java | 4 +-
.../streaming/StreamingIntegrationTester.java | 6 +-
.../hive/hcatalog/streaming/TestStreaming.java | 35 +-
.../hive/hcatalog/api/TestHCatClient.java | 2 +-
.../hcatalog/api/repl/CommandTestUtils.java | 6 +-
.../api/repl/commands/TestCommands.java | 6 +-
.../hive/hcatalog/templeton/AppConfig.java | 6 +-
.../templeton/CatchallExceptionMapper.java | 6 +-
.../hcatalog/templeton/CompleteDelegator.java | 6 +-
.../hcatalog/templeton/DeleteDelegator.java | 6 +-
.../hcatalog/templeton/ExecServiceImpl.java | 6 +-
.../hive/hcatalog/templeton/HcatDelegator.java | 6 +-
.../hcatalog/templeton/LauncherDelegator.java | 6 +-
.../apache/hive/hcatalog/templeton/Main.java | 10 +-
.../hive/hcatalog/templeton/PigDelegator.java | 6 +-
.../hcatalog/templeton/ProxyUserSupport.java | 6 +-
.../hcatalog/templeton/SecureProxySupport.java | 6 +-
.../apache/hive/hcatalog/templeton/Server.java | 6 +-
.../hive/hcatalog/templeton/SqoopDelegator.java | 6 +-
.../hcatalog/templeton/StatusDelegator.java | 6 +-
.../hcatalog/templeton/tool/HDFSCleanup.java | 6 +-
.../hcatalog/templeton/tool/HDFSStorage.java | 6 +-
.../hive/hcatalog/templeton/tool/JobState.java | 6 +-
.../templeton/tool/JobStateTracker.java | 6 +-
.../hcatalog/templeton/tool/LaunchMapper.java | 8 +-
.../hcatalog/templeton/tool/LogRetriever.java | 6 +-
.../templeton/tool/TempletonControllerJob.java | 6 +-
.../hcatalog/templeton/tool/TempletonUtils.java | 6 +-
.../templeton/tool/TrivialExecService.java | 6 +-
.../templeton/tool/ZooKeeperCleanup.java | 6 +-
.../templeton/tool/ZooKeeperStorage.java | 6 +-
hplsql/pom.xml | 5 -
.../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 83 +-
.../main/java/org/apache/hive/hplsql/Cmp.java | 8 +-
.../main/java/org/apache/hive/hplsql/Conn.java | 25 +-
.../main/java/org/apache/hive/hplsql/Copy.java | 9 +-
.../main/java/org/apache/hive/hplsql/Exec.java | 236 +-
.../java/org/apache/hive/hplsql/Package.java | 194 +
.../main/java/org/apache/hive/hplsql/Scope.java | 15 +-
.../main/java/org/apache/hive/hplsql/Stmt.java | 2 +-
.../main/java/org/apache/hive/hplsql/Var.java | 56 +-
.../apache/hive/hplsql/functions/Function.java | 44 +-
.../hive/hplsql/functions/FunctionOra.java | 33 +-
hplsql/src/main/resources/hplsql-site.xml | 2 +-
.../org/apache/hive/hplsql/TestHplsqlLocal.java | 40 +
.../apache/hive/hplsql/TestHplsqlOffline.java | 5 +
hplsql/src/test/queries/local/bool.sql | 14 +
.../src/test/queries/local/create_package.sql | 60 +
.../src/test/queries/local/create_package2.sql | 23 +
hplsql/src/test/queries/local/datatypes.sql | 20 +
hplsql/src/test/queries/local/declare2.sql | 13 +
hplsql/src/test/queries/local/float.sql | 4 +
hplsql/src/test/queries/local/var_scope.sql | 28 +
hplsql/src/test/queries/local/var_scope2.sql | 30 +
.../test/queries/local/var_scope_include.sql | 1 +
.../src/test/queries/offline/insert_mysql.sql | 2 +
hplsql/src/test/results/local/bool.out.txt | 12 +
.../test/results/local/create_package.out.txt | 47 +
.../test/results/local/create_package2.out.txt | 16 +
hplsql/src/test/results/local/datatypes.out.txt | 27 +
hplsql/src/test/results/local/declare2.out.txt | 7 +
hplsql/src/test/results/local/float.out.txt | 6 +
hplsql/src/test/results/local/var_scope.out.txt | 26 +
.../src/test/results/local/var_scope2.out.txt | 26 +
.../test/results/offline/insert_mysql.out.txt | 4 +
hwi/pom.xml | 5 -
.../hadoop/hive/hwi/HWIContextListener.java | 6 +-
.../org/apache/hadoop/hive/hwi/HWIServer.java | 8 +-
.../apache/hadoop/hive/hwi/HWISessionItem.java | 8 +-
.../hadoop/hive/hwi/HWISessionManager.java | 6 +-
...CustomNonSettableStructObjectInspector1.java | 8 +-
.../api/TestHCatClientNotification.java | 6 +-
.../listener/TestDbNotificationListener.java | 8 +-
.../vectorization/AbstractExpression.java | 150 +
.../vectorization/VectorizationBench.java | 506 -
.../VectorizedArithmeticBench.java | 112 +
.../VectorizedComparisonBench.java | 215 +
.../vectorization/VectorizedLogicBench.java | 147 +
itests/hive-unit/pom.xml | 17 +-
.../hive/metastore/TestHiveMetaStore.java | 6 +-
.../hive/metastore/TestHiveMetaStoreTxns.java | 3 +-
...TestHiveMetaStoreWithEnvironmentContext.java | 11 +-
.../hive/metastore/TestMetastoreVersion.java | 6 +-
.../metastore/hbase/HBaseIntegrationTests.java | 6 +-
.../TestHBaseAggrStatsCacheIntegration.java | 6 +-
.../hive/metastore/hbase/TestHBaseImport.java | 6 +-
.../metastore/hbase/TestHBaseMetastoreSql.java | 6 +-
.../metastore/hbase/TestHBaseSchemaTool.java | 584 +
.../metastore/hbase/TestHBaseSchemaTool2.java | 61 +
.../hbase/TestHBaseStoreIntegration.java | 6 +-
.../hbase/TestStorageDescriptorSharing.java | 6 +-
.../TestHiveAuthorizerCheckInvocation.java | 6 +-
.../hadoop/hive/thrift/TestDBTokenStore.java | 2 +-
.../org/apache/hive/jdbc/TestJdbcDriver2.java | 309 +-
.../apache/hive/jdbc/TestServiceDiscovery.java | 178 +
.../hive/jdbc/cbo_rp_TestJdbcDriver2.java | 6 +-
itests/qtest-accumulo/pom.xml | 6 -
itests/qtest-spark/pom.xml | 5 -
itests/qtest/pom.xml | 11 +-
.../test/resources/testconfiguration.properties | 13 +
.../apache/hadoop/hive/serde2/TestSerDe.java | 6 +-
.../hive/udf/example/GenericUDFExampleAdd.java | 48 +
.../org/apache/hadoop/hive/ql/QTestUtil.java | 10 +-
.../hive/ql/hooks/CheckColumnAccessHook.java | 4 +-
...DummyHiveMetastoreAuthorizationProvider.java | 6 +-
...SQLStdHiveAuthorizationValidatorForTest.java | 47 +-
.../hadoop/hive/ql/udf/UDFFileLookup.java | 4 -
.../hive/ql/udf/generic/GenericUDAFSumList.java | 6 +-
jdbc/pom.xml | 19 +-
.../org/apache/hive/jdbc/HiveConnection.java | 123 +-
.../org/apache/hive/jdbc/HiveDataSource.java | 24 +-
.../apache/hive/jdbc/HiveDatabaseMetaData.java | 4 +-
.../java/org/apache/hive/jdbc/HiveDriver.java | 6 +
.../apache/hive/jdbc/HiveQueryResultSet.java | 6 +-
.../org/apache/hive/jdbc/HiveStatement.java | 21 +-
jdbc/src/java/org/apache/hive/jdbc/Utils.java | 8 +-
.../hive/jdbc/ZooKeeperHiveClientHelper.java | 14 +-
.../org/apache/hive/jdbc/HiveStatementTest.java | 31 +
llap-client/pom.xml | 11 +-
.../hive/llap/registry/ServiceInstance.java | 73 +
.../hive/llap/registry/ServiceInstanceSet.java | 57 +
.../hive/llap/registry/ServiceRegistry.java | 59 +
.../registry/impl/LlapFixedRegistryImpl.java | 223 +
.../llap/registry/impl/LlapRegistryService.java | 87 +
.../registry/impl/LlapYarnRegistryImpl.java | 383 +
llap-server/pom.xml | 11 -
.../hadoop/hive/llap/cache/BuddyAllocator.java | 91 +-
.../hive/llap/cache/LowLevelCacheImpl.java | 8 +-
.../llap/cache/LowLevelCacheMemoryManager.java | 12 +
.../llap/cache/LowLevelFifoCachePolicy.java | 3 -
.../hadoop/hive/llap/cache/MemoryManager.java | 1 +
.../hive/llap/cli/LlapOptionsProcessor.java | 6 +-
.../hadoop/hive/llap/cli/LlapServiceDriver.java | 6 +-
.../hadoop/hive/llap/daemon/HistoryLogger.java | 5 +-
.../hive/llap/daemon/impl/AMReporter.java | 6 +-
.../llap/daemon/impl/ContainerRunnerImpl.java | 2 +-
.../hive/llap/daemon/impl/LlapDaemon.java | 2 +-
.../impl/LlapDaemonProtocolServerImpl.java | 6 +-
.../llap/daemon/impl/TaskExecutorService.java | 21 +-
.../llap/daemon/impl/TaskRunnerCallable.java | 6 +-
.../llap/daemon/registry/ServiceInstance.java | 73 -
.../daemon/registry/ServiceInstanceSet.java | 57 -
.../llap/daemon/registry/ServiceRegistry.java | 59 -
.../registry/impl/LlapFixedRegistryImpl.java | 222 -
.../registry/impl/LlapRegistryService.java | 86 -
.../registry/impl/LlapYarnRegistryImpl.java | 383 -
.../daemon/services/impl/LlapWebServices.java | 2 -
.../hive/llap/io/api/impl/LlapInputFormat.java | 5 +-
.../hive/llap/io/api/impl/LlapIoImpl.java | 9 +-
.../llap/io/encoded/OrcEncodedDataReader.java | 14 +-
.../hive/llap/shufflehandler/DirWatcher.java | 6 +-
.../shufflehandler/FadvisedChunkedFile.java | 6 +-
.../llap/shufflehandler/FadvisedFileRegion.java | 6 +-
.../hive/llap/shufflehandler/IndexCache.java | 6 +-
.../llap/shufflehandler/ShuffleHandler.java | 19 +-
.../tezplugins/helpers/SourceStateTracker.java | 6 +-
.../dag/app/rm/LlapTaskSchedulerService.java | 12 +-
.../hive/llap/cache/TestBuddyAllocator.java | 12 +-
.../TestIncrementalObjectSizeEstimator.java | 6 +-
.../hive/llap/cache/TestLowLevelCacheImpl.java | 6 +-
.../llap/cache/TestLowLevelLrfuCachePolicy.java | 6 +-
.../hive/llap/cache/TestOrcMetadataCache.java | 8 +-
.../hive/llap/daemon/MiniLlapCluster.java | 6 +-
.../app/rm/TestLlapTaskSchedulerService.java | 2 +-
metastore/if/hive_metastore.thrift | 14 +
metastore/pom.xml | 13 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 3433 +++--
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 334 +
.../ThriftHiveMetastore_server.skeleton.cpp | 10 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 349 +-
.../gen/thrift/gen-cpp/hive_metastore_types.h | 65 +
.../hadoop/hive/metastore/api/TableMeta.java | 701 +
.../hive/metastore/api/ThriftHiveMetastore.java | 13687 ++++++++++-------
.../gen-php/metastore/ThriftHiveMetastore.php | 1915 ++-
.../src/gen/thrift/gen-php/metastore/Types.php | 144 +
.../hive_metastore/ThriftHiveMetastore-remote | 14 +
.../hive_metastore/ThriftHiveMetastore.py | 1353 +-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 110 +
.../gen/thrift/gen-rb/hive_metastore_types.rb | 25 +
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 149 +
.../hive/metastore/AggregateStatsCache.java | 34 +-
.../apache/hadoop/hive/metastore/Deadline.java | 6 +-
.../hadoop/hive/metastore/HiveAlterHandler.java | 12 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 56 +-
.../hive/metastore/HiveMetaStoreClient.java | 54 +-
.../hive/metastore/HiveMetaStoreFsImpl.java | 8 +-
.../hadoop/hive/metastore/IMetaStoreClient.java | 23 +
.../hive/metastore/MetaStoreDirectSql.java | 6 +-
.../hadoop/hive/metastore/MetaStoreInit.java | 6 +-
.../hadoop/hive/metastore/MetaStoreUtils.java | 43 +-
.../hadoop/hive/metastore/ObjectStore.java | 124 +-
.../hive/metastore/PartFilterExprUtil.java | 6 +-
.../apache/hadoop/hive/metastore/RawStore.java | 4 +
.../hadoop/hive/metastore/RawStoreProxy.java | 6 -
.../hive/metastore/RetryingHMSHandler.java | 6 +-
.../hive/metastore/RetryingMetaStoreClient.java | 6 +-
.../hive/metastore/TUGIBasedProcessor.java | 6 +-
.../apache/hadoop/hive/metastore/Warehouse.java | 6 +-
.../hive/metastore/events/EventCleanerTask.java | 8 +-
.../hbase/AggrStatsInvalidatorFilter.java | 8 +-
.../hive/metastore/hbase/HBaseImport.java | 6 +-
.../hive/metastore/hbase/HBaseReadWrite.java | 704 +-
.../hive/metastore/hbase/HBaseSchemaTool.java | 282 +-
.../hadoop/hive/metastore/hbase/HBaseStore.java | 50 +-
.../hadoop/hive/metastore/hbase/HBaseUtils.java | 109 +-
.../metastore/hbase/PartitionKeyComparator.java | 6 +-
.../hbase/SharedStorageDescriptor.java | 6 +-
.../hadoop/hive/metastore/hbase/StatsCache.java | 6 +-
.../metastore/hbase/TephraHBaseConnection.java | 6 +-
.../metastore/hbase/VanillaHBaseConnection.java | 6 +-
.../hadoop/hive/metastore/parser/Filter.g | 218 +
.../spec/CompositePartitionSpecProxy.java | 4 +-
.../hive/metastore/tools/HiveMetaTool.java | 6 +-
.../metastore/txn/CompactionTxnHandler.java | 6 +-
.../hadoop/hive/metastore/txn/TxnDbUtil.java | 6 +-
.../hadoop/hive/metastore/txn/TxnHandler.java | 6 +-
.../metastore/DummyMetaStoreInitListener.java | 4 +-
.../DummyRawStoreControlledCommit.java | 7 +
.../DummyRawStoreForJdoConnection.java | 7 +
.../hive/metastore/VerifyingObjectStore.java | 6 +-
.../hbase/TestHBaseAggregateStatsCache.java | 6 +-
.../hive/metastore/hbase/TestHBaseStore.java | 6 +-
.../metastore/hbase/TestHBaseStoreCached.java | 6 +-
.../hbase/TestSharedStorageDescriptor.java | 6 +-
.../metastore/txn/TestTxnHandlerNegative.java | 6 +-
pom.xml | 26 +-
ql/pom.xml | 10 -
.../org/apache/hadoop/hive/llap/LogLevels.java | 4 +-
.../java/org/apache/hadoop/hive/ql/Context.java | 10 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 93 +-
.../apache/hadoop/hive/ql/DriverContext.java | 8 +-
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 6 -
.../org/apache/hadoop/hive/ql/QueryPlan.java | 3 -
.../hive/ql/exec/AbstractFileMergeOperator.java | 9 +-
.../hadoop/hive/ql/exec/ArchiveUtils.java | 6 +-
.../hadoop/hive/ql/exec/AutoProgressor.java | 6 +-
.../hadoop/hive/ql/exec/ColumnStatsTask.java | 6 +-
.../hive/ql/exec/ColumnStatsUpdateTask.java | 10 +-
.../hadoop/hive/ql/exec/CommonJoinOperator.java | 6 +-
.../hive/ql/exec/CommonMergeJoinOperator.java | 14 +-
.../apache/hadoop/hive/ql/exec/CopyTask.java | 6 +-
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 52 +-
.../hive/ql/exec/DefaultBucketMatcher.java | 6 +-
.../hadoop/hive/ql/exec/DemuxOperator.java | 6 +-
.../apache/hadoop/hive/ql/exec/ExplainTask.java | 8 +-
.../ql/exec/ExprNodeGenericFuncEvaluator.java | 8 +-
.../hadoop/hive/ql/exec/FetchOperator.java | 7 +-
.../apache/hadoop/hive/ql/exec/FetchTask.java | 6 +-
.../hadoop/hive/ql/exec/FileSinkOperator.java | 9 +-
.../hadoop/hive/ql/exec/FunctionRegistry.java | 133 +-
.../hadoop/hive/ql/exec/FunctionTask.java | 10 +-
.../hive/ql/exec/HashTableSinkOperator.java | 8 +-
.../apache/hadoop/hive/ql/exec/Heartbeater.java | 6 +-
.../hive/ql/exec/HiveTotalOrderPartitioner.java | 6 +-
.../hadoop/hive/ql/exec/JoinOperator.java | 6 +-
.../hadoop/hive/ql/exec/KeyWrapperFactory.java | 20 +-
.../hadoop/hive/ql/exec/MapJoinOperator.java | 26 +-
.../apache/hadoop/hive/ql/exec/MapOperator.java | 27 +
.../hadoop/hive/ql/exec/MapredContext.java | 10 +-
.../apache/hadoop/hive/ql/exec/MoveTask.java | 9 +-
.../apache/hadoop/hive/ql/exec/MuxOperator.java | 6 +-
.../hadoop/hive/ql/exec/ObjectCacheFactory.java | 6 +-
.../apache/hadoop/hive/ql/exec/Operator.java | 58 +-
.../hadoop/hive/ql/exec/OperatorFactory.java | 6 +-
.../hadoop/hive/ql/exec/OperatorUtils.java | 17 +-
.../hive/ql/exec/OrcFileMergeOperator.java | 6 +-
.../hadoop/hive/ql/exec/PTFPartition.java | 6 +-
.../hive/ql/exec/PartitionKeySampler.java | 6 +-
.../hive/ql/exec/RCFileMergeOperator.java | 6 +-
.../hadoop/hive/ql/exec/ReduceSinkOperator.java | 10 +
.../apache/hadoop/hive/ql/exec/Registry.java | 18 +-
.../hadoop/hive/ql/exec/SMBMapJoinOperator.java | 8 +-
.../hadoop/hive/ql/exec/SkewJoinHandler.java | 8 +-
.../ql/exec/SparkHashTableSinkOperator.java | 6 +-
.../hadoop/hive/ql/exec/StatsNoJobTask.java | 6 +-
.../apache/hadoop/hive/ql/exec/StatsTask.java | 6 +-
.../org/apache/hadoop/hive/ql/exec/Task.java | 6 +-
.../hive/ql/exec/TezDummyStoreOperator.java | 9 +
.../apache/hadoop/hive/ql/exec/TopNHash.java | 13 +-
.../hadoop/hive/ql/exec/UDTFOperator.java | 6 +-
.../apache/hadoop/hive/ql/exec/Utilities.java | 26 +-
.../hive/ql/exec/errors/TaskLogProcessor.java | 6 +-
.../mapjoin/MapJoinMemoryExhaustionHandler.java | 6 +-
.../hadoop/hive/ql/exec/mr/ExecDriver.java | 26 +-
.../hadoop/hive/ql/exec/mr/ExecMapper.java | 18 +-
.../hive/ql/exec/mr/ExecMapperContext.java | 3 -
.../hadoop/hive/ql/exec/mr/ExecReducer.java | 8 +-
.../hadoop/hive/ql/exec/mr/HashTableLoader.java | 6 +-
.../hadoop/hive/ql/exec/mr/MapredLocalTask.java | 8 +-
.../hadoop/hive/ql/exec/mr/ObjectCache.java | 16 +-
.../apache/hadoop/hive/ql/exec/mr/Throttle.java | 4 +-
.../persistence/BytesBytesMultiHashMap.java | 6 +-
.../ql/exec/persistence/FlatRowContainer.java | 6 +-
.../ql/exec/persistence/HashMapWrapper.java | 6 +-
.../persistence/HybridHashTableContainer.java | 69 +-
.../ql/exec/persistence/KeyValueContainer.java | 6 +-
.../persistence/MapJoinBytesTableContainer.java | 11 +-
.../hive/ql/exec/persistence/MapJoinKey.java | 4 +-
.../ql/exec/persistence/MapJoinKeyObject.java | 6 +-
.../ql/exec/persistence/ObjectContainer.java | 6 +-
.../hive/ql/exec/persistence/RowContainer.java | 6 +-
.../hive/ql/exec/spark/HashTableLoader.java | 6 +-
.../hive/ql/exec/spark/HiveKVResultCache.java | 6 +-
.../ql/exec/spark/HiveSparkClientFactory.java | 6 +-
.../hive/ql/exec/spark/KryoSerializer.java | 6 +-
.../ql/exec/spark/LocalHiveSparkClient.java | 16 +-
.../ql/exec/spark/RemoteHiveSparkClient.java | 6 +-
.../hive/ql/exec/spark/SmallTableCache.java | 6 +-
.../exec/spark/SparkDynamicPartitionPruner.java | 6 +-
.../ql/exec/spark/SparkMapRecordHandler.java | 9 +-
.../exec/spark/SparkMergeFileRecordHandler.java | 6 +-
.../hadoop/hive/ql/exec/spark/SparkPlan.java | 8 +-
.../hive/ql/exec/spark/SparkPlanGenerator.java | 7 +-
.../hive/ql/exec/spark/SparkRecordHandler.java | 10 +-
.../ql/exec/spark/SparkReduceRecordHandler.java | 8 +-
.../hadoop/hive/ql/exec/spark/SparkTask.java | 8 +-
.../ql/exec/spark/session/SparkSessionImpl.java | 6 +-
.../spark/session/SparkSessionManagerImpl.java | 6 +-
.../ql/exec/spark/status/SparkJobMonitor.java | 8 +-
.../spark/status/impl/JobMetricsListener.java | 7 +-
.../spark/status/impl/LocalSparkJobStatus.java | 6 +-
.../spark/status/impl/RemoteSparkJobStatus.java | 6 +-
.../ql/exec/tez/ColumnarSplitSizeEstimator.java | 6 +-
.../hive/ql/exec/tez/CustomPartitionEdge.java | 6 +-
.../hive/ql/exec/tez/CustomPartitionVertex.java | 7 +-
.../hadoop/hive/ql/exec/tez/DagUtils.java | 6 +-
.../ql/exec/tez/DynamicPartitionPruner.java | 6 +-
.../hive/ql/exec/tez/HashTableLoader.java | 6 +-
.../hive/ql/exec/tez/HivePreWarmProcessor.java | 6 +-
.../hive/ql/exec/tez/HiveSplitGenerator.java | 6 +-
.../hive/ql/exec/tez/LlapObjectCache.java | 24 +-
.../hive/ql/exec/tez/MapRecordProcessor.java | 33 +-
.../hive/ql/exec/tez/MapRecordSource.java | 8 +-
.../ql/exec/tez/MergeFileRecordProcessor.java | 10 +-
.../hadoop/hive/ql/exec/tez/ObjectCache.java | 6 +-
.../hive/ql/exec/tez/RecordProcessor.java | 7 +-
.../hive/ql/exec/tez/ReduceRecordProcessor.java | 6 +-
.../hive/ql/exec/tez/ReduceRecordSource.java | 26 +-
.../hadoop/hive/ql/exec/tez/SplitGrouper.java | 6 +-
.../hive/ql/exec/tez/TezJobExecHelper.java | 6 +-
.../hadoop/hive/ql/exec/tez/TezProcessor.java | 17 +-
.../hive/ql/exec/tez/TezSessionPoolManager.java | 6 +-
.../hive/ql/exec/tez/TezSessionState.java | 10 +-
.../apache/hadoop/hive/ql/exec/tez/TezTask.java | 9 +
.../ql/exec/tez/tools/KeyValueInputMerger.java | 6 +-
.../ql/exec/tez/tools/KeyValuesInputMerger.java | 6 +-
.../hive/ql/exec/vector/VectorAssignRow.java | 8 +-
.../ql/exec/vector/VectorColumnOrderedMap.java | 6 +-
.../hive/ql/exec/vector/VectorCopyRow.java | 8 +-
.../ql/exec/vector/VectorDeserializeRow.java | 68 +-
.../exec/vector/VectorExpressionDescriptor.java | 6 +-
.../hive/ql/exec/vector/VectorExtractRow.java | 8 +-
.../ql/exec/vector/VectorGroupByOperator.java | 8 +-
.../exec/vector/VectorMapJoinBaseOperator.java | 8 +-
.../ql/exec/vector/VectorMapJoinOperator.java | 6 +-
.../exec/vector/VectorSMBMapJoinOperator.java | 6 +-
.../hive/ql/exec/vector/VectorSerializeRow.java | 68 +-
.../exec/vector/VectorSerializeRowNoNulls.java | 412 -
.../ql/exec/vector/VectorizationContext.java | 24 +-
.../ql/exec/vector/VectorizedBatchUtil.java | 6 +-
.../ql/exec/vector/VectorizedRowBatchCtx.java | 6 +-
.../expressions/FilterStructColumnInList.java | 3 +-
.../ql/exec/vector/expressions/NullUtil.java | 21 +-
.../vector/expressions/StructColumnInList.java | 3 +-
.../vector/expressions/VectorUDFDateString.java | 10 +-
.../exec/vector/keyseries/VectorKeySeries.java | 98 +
.../VectorKeySeriesBytesSerialized.java | 271 +
.../vector/keyseries/VectorKeySeriesImpl.java | 68 +
.../VectorKeySeriesLongSerialized.java | 249 +
.../VectorKeySeriesMultiSerialized.java | 187 +
.../keyseries/VectorKeySeriesSerialized.java | 35 +
.../VectorKeySeriesSerializedImpl.java | 130 +
.../keyseries/VectorKeySeriesSingleImpl.java | 158 +
.../mapjoin/VectorMapJoinCommonOperator.java | 55 +-
.../VectorMapJoinGenerateResultOperator.java | 37 +-
...pJoinInnerBigOnlyGenerateResultOperator.java | 8 +-
.../VectorMapJoinInnerBigOnlyLongOperator.java | 14 +-
...ctorMapJoinInnerBigOnlyMultiKeyOperator.java | 38 +-
...VectorMapJoinInnerBigOnlyStringOperator.java | 14 +-
...ectorMapJoinInnerGenerateResultOperator.java | 8 +-
.../mapjoin/VectorMapJoinInnerLongOperator.java | 14 +-
.../VectorMapJoinInnerMultiKeyOperator.java | 38 +-
.../VectorMapJoinInnerStringOperator.java | 14 +-
...orMapJoinLeftSemiGenerateResultOperator.java | 8 +-
.../VectorMapJoinLeftSemiLongOperator.java | 14 +-
.../VectorMapJoinLeftSemiMultiKeyOperator.java | 38 +-
.../VectorMapJoinLeftSemiStringOperator.java | 14 +-
...ectorMapJoinOuterGenerateResultOperator.java | 20 +-
.../mapjoin/VectorMapJoinOuterLongOperator.java | 16 +-
.../VectorMapJoinOuterMultiKeyOperator.java | 21 +-
.../VectorMapJoinOuterStringOperator.java | 16 +-
.../mapjoin/VectorMapJoinRowBytesContainer.java | 6 +-
.../fast/VectorMapJoinFastBytesHashMap.java | 11 +-
.../VectorMapJoinFastBytesHashMultiSet.java | 11 +-
.../fast/VectorMapJoinFastBytesHashSet.java | 11 +-
.../fast/VectorMapJoinFastBytesHashTable.java | 17 +-
.../fast/VectorMapJoinFastBytesHashUtil.java | 4 -
.../fast/VectorMapJoinFastHashTable.java | 8 +-
.../fast/VectorMapJoinFastHashTableLoader.java | 8 +-
.../fast/VectorMapJoinFastIntHashUtil.java | 32 -
.../mapjoin/fast/VectorMapJoinFastKeyStore.java | 8 +-
.../fast/VectorMapJoinFastLongHashMap.java | 11 +-
.../fast/VectorMapJoinFastLongHashMultiSet.java | 11 +-
.../fast/VectorMapJoinFastLongHashSet.java | 11 +-
.../fast/VectorMapJoinFastLongHashTable.java | 19 +-
.../fast/VectorMapJoinFastLongHashUtil.java | 11 -
.../fast/VectorMapJoinFastTableContainer.java | 8 +-
.../fast/VectorMapJoinFastValueStore.java | 8 +-
.../VectorMapJoinOptimizedCreateHashTable.java | 8 +-
.../VectorMapJoinOptimizedHashTable.java | 6 +-
.../VectorMapJoinOptimizedLongCommon.java | 8 +-
.../VectorReduceSinkCommonOperator.java | 416 +
.../VectorReduceSinkLongOperator.java | 72 +
.../VectorReduceSinkMultiKeyOperator.java | 68 +
.../VectorReduceSinkStringOperator.java | 70 +
.../ql/exec/vector/udf/VectorUDFAdaptor.java | 5 +
.../ql/exec/vector/udf/VectorUDFArgDesc.java | 19 +-
.../hadoop/hive/ql/history/HiveHistoryImpl.java | 8 +-
.../hive/ql/history/HiveHistoryViewer.java | 6 +-
.../apache/hadoop/hive/ql/hooks/ATSHook.java | 27 +-
.../hadoop/hive/ql/hooks/LineageLogger.java | 16 +-
.../hive/ql/hooks/PostExecOrcFileDump.java | 6 +-
.../ql/hooks/PostExecTezSummaryPrinter.java | 6 +-
.../hadoop/hive/ql/hooks/WriteEntity.java | 6 +-
.../apache/hadoop/hive/ql/index/HiveIndex.java | 6 +-
.../hive/ql/index/HiveIndexQueryContext.java | 10 +-
.../hadoop/hive/ql/index/HiveIndexResult.java | 8 +-
.../hive/ql/index/HiveIndexedInputFormat.java | 6 +-
.../ql/index/bitmap/BitmapIndexHandler.java | 6 +-
.../ql/index/compact/CompactIndexHandler.java | 6 +-
.../compact/HiveCompactIndexInputFormat.java | 8 +-
.../hadoop/hive/ql/io/AcidInputFormat.java | 14 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 6 +-
.../hive/ql/io/BucketizedHiveInputFormat.java | 8 +-
.../org/apache/hadoop/hive/ql/io/CodecPool.java | 6 +-
.../hive/ql/io/CombineHiveInputFormat.java | 6 +-
.../org/apache/hadoop/hive/ql/io/HdfsUtils.java | 6 +-
.../ql/io/HiveContextAwareRecordReader.java | 8 +-
.../hadoop/hive/ql/io/HiveFileFormatUtils.java | 6 +-
.../hadoop/hive/ql/io/HiveInputFormat.java | 10 +-
.../apache/hadoop/hive/ql/io/IOContextMap.java | 6 +-
.../hadoop/hive/ql/io/NullRowsInputFormat.java | 6 +-
.../org/apache/hadoop/hive/ql/io/RCFile.java | 6 +-
.../hadoop/hive/ql/io/StorageFormatFactory.java | 6 +-
.../ql/io/avro/AvroContainerOutputFormat.java | 6 +-
.../ql/io/avro/AvroGenericRecordReader.java | 6 +-
.../hive/ql/io/merge/MergeFileMapper.java | 6 +-
.../hadoop/hive/ql/io/merge/MergeFileTask.java | 2 +-
.../hadoop/hive/ql/io/merge/MergeFileWork.java | 6 +-
.../apache/hadoop/hive/ql/io/orc/InStream.java | 6 +-
.../hadoop/hive/ql/io/orc/MemoryManager.java | 6 +-
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 30 +-
.../hive/ql/io/orc/OrcNewInputFormat.java | 6 +-
.../hadoop/hive/ql/io/orc/OrcOutputFormat.java | 6 +-
.../hive/ql/io/orc/OrcRawRecordMerger.java | 6 +-
.../hadoop/hive/ql/io/orc/OrcRecordUpdater.java | 6 +-
.../apache/hadoop/hive/ql/io/orc/OrcSerde.java | 6 +-
.../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 6 +-
.../apache/hadoop/hive/ql/io/orc/OrcUtils.java | 6 +-
.../hadoop/hive/ql/io/orc/ReaderImpl.java | 8 +-
.../hive/ql/io/orc/RecordReaderFactory.java | 6 +-
.../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 11 +-
.../ql/io/orc/RunLengthIntegerReaderV2.java | 6 +-
.../hadoop/hive/ql/io/orc/WriterImpl.java | 6 +-
.../ql/io/orc/encoded/EncodedReaderImpl.java | 115 +-
.../hive/ql/io/parquet/LeafFilterFactory.java | 6 +-
.../ql/io/parquet/MapredParquetInputFormat.java | 6 +-
.../io/parquet/MapredParquetOutputFormat.java | 6 +-
.../hive/ql/io/parquet/ProjectionPusher.java | 6 +-
.../parquet/VectorizedParquetInputFormat.java | 8 +-
.../read/ParquetFilterPredicateConverter.java | 6 +-
.../read/ParquetRecordReaderWrapper.java | 6 +-
.../ql/io/parquet/write/DataWritableWriter.java | 8 +-
.../write/ParquetRecordWriterWrapper.java | 6 +-
.../ql/io/rcfile/stats/PartialScanMapper.java | 6 +-
.../ql/io/rcfile/stats/PartialScanTask.java | 7 +-
.../rcfile/truncate/ColumnTruncateMapper.java | 6 +-
.../io/rcfile/truncate/ColumnTruncateTask.java | 5 +-
.../hive/ql/io/sarg/ConvertAstToSearchArg.java | 6 +-
.../hadoop/hive/ql/lockmgr/DbLockManager.java | 6 +-
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 6 +-
.../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 8 +-
.../hive/ql/lockmgr/EmbeddedLockManager.java | 19 +-
.../zookeeper/CuratorFrameworkSingleton.java | 6 +-
.../zookeeper/ZooKeeperHiveLockManager.java | 16 +-
.../hadoop/hive/ql/metadata/DummyPartition.java | 8 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 83 +-
.../hive/ql/metadata/HiveMetaStoreChecker.java | 6 +-
.../hadoop/hive/ql/metadata/HiveUtils.java | 6 +-
.../hadoop/hive/ql/metadata/Partition.java | 23 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 84 +-
.../apache/hadoop/hive/ql/metadata/Table.java | 44 +-
.../formatting/JsonMetaDataFormatter.java | 6 +-
.../formatting/TextMetaDataFormatter.java | 6 +-
.../ql/optimizer/AbstractBucketJoinProc.java | 4 -
...tionSizeBasedBigTableSelectorForAutoSMJ.java | 8 +-
.../hive/ql/optimizer/BucketJoinProcCtx.java | 8 +-
.../ql/optimizer/BucketMapJoinOptimizer.java | 6 +-
.../BucketingSortingReduceSinkOptimizer.java | 6 +
.../hive/ql/optimizer/ColumnPrunerProcCtx.java | 7 +
.../ql/optimizer/ColumnPrunerProcFactory.java | 6 +-
.../hive/ql/optimizer/ConstantPropagate.java | 6 +-
.../ql/optimizer/ConstantPropagateProcCtx.java | 21 +-
.../optimizer/ConstantPropagateProcFactory.java | 6 +-
.../hive/ql/optimizer/ConvertJoinMapJoin.java | 6 +-
.../DynamicPartitionPruningOptimization.java | 6 +-
.../hive/ql/optimizer/GenMRFileSink1.java | 6 +-
.../hive/ql/optimizer/GenMapRedUtils.java | 17 +-
.../hive/ql/optimizer/GlobalLimitOptimizer.java | 6 +-
.../hive/ql/optimizer/GroupByOptimizer.java | 14 +-
.../ql/optimizer/IdentityProjectRemover.java | 6 +-
.../hadoop/hive/ql/optimizer/IndexUtils.java | 6 +-
.../hive/ql/optimizer/MapJoinProcessor.java | 6 +-
.../ql/optimizer/OperatorComparatorFactory.java | 8 +-
.../hadoop/hive/ql/optimizer/Optimizer.java | 29 +-
.../ql/optimizer/PartitionColumnsSeparator.java | 525 +
.../hive/ql/optimizer/PointLookupOptimizer.java | 96 +-
.../hadoop/hive/ql/optimizer/PrunerUtils.java | 8 -
.../ql/optimizer/ReduceSinkMapJoinProc.java | 6 +-
.../optimizer/RemoveDynamicPruningBySize.java | 6 +-
.../hadoop/hive/ql/optimizer/SamplePruner.java | 8 +-
.../ql/optimizer/SetReducerParallelism.java | 6 +-
.../hive/ql/optimizer/SimpleFetchOptimizer.java | 6 +-
.../hive/ql/optimizer/SkewJoinOptimizer.java | 6 +-
.../optimizer/SortedDynPartitionOptimizer.java | 6 +-
.../SortedMergeBucketMapJoinOptimizer.java | 8 +-
.../SparkRemoveDynamicPruningBySize.java | 8 +-
.../hive/ql/optimizer/StatsOptimizer.java | 50 +-
.../ql/optimizer/calcite/HiveCalciteUtil.java | 15 +-
.../ql/optimizer/calcite/HiveRelOptUtil.java | 6 +-
.../ql/optimizer/calcite/RelOptHiveTable.java | 8 +-
.../optimizer/calcite/cost/HiveCostModel.java | 6 +-
.../calcite/cost/HiveOnTezCostModel.java | 6 +-
.../calcite/reloperators/HiveTableScan.java | 6 +-
.../rules/HiveInsertExchange4JoinRule.java | 8 +-
.../calcite/rules/HiveJoinAddNotNullRule.java | 8 +-
.../calcite/rules/HiveJoinToMultiJoinRule.java | 6 +-
.../calcite/rules/HivePreFilteringRule.java | 10 +-
.../calcite/rules/HiveRelFieldTrimmer.java | 143 +-
.../optimizer/calcite/rules/PartitionPrune.java | 6 +-
.../calcite/stats/HiveRelMdRowCount.java | 20 +-
.../calcite/translator/ASTConverter.java | 40 +-
.../calcite/translator/ExprNodeConverter.java | 12 +-
.../calcite/translator/HiveOpConverter.java | 13 +-
.../translator/HiveOpConverterPostProc.java | 6 +-
.../translator/PlanModifierForASTConv.java | 9 +-
.../calcite/translator/PlanModifierUtil.java | 6 +-
.../calcite/translator/RexNodeConverter.java | 19 +-
.../translator/SqlFunctionConverter.java | 6 +-
.../correlation/CorrelationOptimizer.java | 6 +-
.../QueryPlanTreeTransformation.java | 6 +-
.../ql/optimizer/index/RewriteCanApplyCtx.java | 6 +-
.../ql/optimizer/index/RewriteGBUsingIndex.java | 6 +-
.../index/RewriteParseContextGenerator.java | 6 +-
.../RewriteQueryUsingAggregateIndexCtx.java | 6 +-
.../ql/optimizer/lineage/OpProcFactory.java | 25 +-
.../LBPartitionProcFactory.java | 6 +-
.../ListBucketingPruner.java | 6 +-
.../pcr/PartitionConditionRemover.java | 8 +-
.../ql/optimizer/pcr/PcrExprProcFactory.java | 39 +-
.../hive/ql/optimizer/pcr/PcrOpProcFactory.java | 8 +-
.../optimizer/physical/CrossProductCheck.java | 8 +-
.../physical/GenSparkSkewJoinProcessor.java | 6 +-
.../hive/ql/optimizer/physical/LlapDecider.java | 8 +-
.../physical/LocalMapJoinProcFactory.java | 6 +-
.../ql/optimizer/physical/MemoryDecider.java | 6 +-
.../physical/MetadataOnlyOptimizer.java | 6 +-
.../optimizer/physical/NullScanOptimizer.java | 6 +-
.../physical/NullScanTaskDispatcher.java | 12 +-
.../ql/optimizer/physical/SerializeFilter.java | 6 +-
.../hive/ql/optimizer/physical/Vectorizer.java | 235 +-
.../physical/index/IndexWhereProcCtx.java | 6 +-
.../physical/index/IndexWhereProcessor.java | 6 +-
.../hive/ql/optimizer/ppr/OpProcFactory.java | 3 +-
.../ppr/PartitionExpressionForMetastore.java | 6 +-
.../hive/ql/optimizer/ppr/PartitionPruner.java | 6 +-
.../spark/CombineEquivalentWorkResolver.java | 6 +-
.../spark/SetSparkReducerParallelism.java | 6 +-
.../optimizer/spark/SparkMapJoinOptimizer.java | 6 +-
.../spark/SparkReduceSinkMapJoinProc.java | 6 +-
.../stats/annotation/StatsRulesProcFactory.java | 10 +-
.../ql/optimizer/unionproc/UnionProcessor.java | 5 -
.../hive/ql/parse/AppMasterEventProcessor.java | 6 +-
.../hive/ql/parse/BaseSemanticAnalyzer.java | 13 +-
.../hadoop/hive/ql/parse/CalcitePlanner.java | 8 +-
.../hive/ql/parse/ColumnAccessAnalyzer.java | 6 +-
.../ql/parse/ColumnStatsSemanticAnalyzer.java | 8 +-
.../hive/ql/parse/DDLSemanticAnalyzer.java | 11 +-
.../apache/hadoop/hive/ql/parse/EximUtil.java | 6 +-
.../hadoop/hive/ql/parse/FileSinkProcessor.java | 8 +-
.../hive/ql/parse/FunctionSemanticAnalyzer.java | 10 +-
.../hadoop/hive/ql/parse/GenTezUtils.java | 6 +-
.../apache/hadoop/hive/ql/parse/GenTezWork.java | 10 +-
.../apache/hadoop/hive/ql/parse/HiveParser.g | 5 -
.../hive/ql/parse/ImportSemanticAnalyzer.java | 4 +
.../hadoop/hive/ql/parse/InputSignature.java | 6 +-
.../hive/ql/parse/MacroSemanticAnalyzer.java | 8 +-
.../hadoop/hive/ql/parse/MapReduceCompiler.java | 6 +-
.../hive/ql/parse/MetaDataExportListener.java | 6 +-
.../hadoop/hive/ql/parse/PTFTranslator.java | 6 +-
.../hadoop/hive/ql/parse/ParseDriver.java | 6 +-
.../hive/ql/parse/ProcessAnalyzeTable.java | 6 +-
.../org/apache/hadoop/hive/ql/parse/QB.java | 6 +-
.../org/apache/hadoop/hive/ql/parse/QBExpr.java | 6 +-
.../apache/hadoop/hive/ql/parse/QBMetaData.java | 6 +-
.../hadoop/hive/ql/parse/QBParseInfo.java | 6 +-
.../hadoop/hive/ql/parse/RowResolver.java | 6 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 80 +-
.../hive/ql/parse/TableAccessAnalyzer.java | 6 +-
.../hadoop/hive/ql/parse/TaskCompiler.java | 6 +-
.../hadoop/hive/ql/parse/TezCompiler.java | 7 +-
.../hadoop/hive/ql/parse/TypeCheckCtx.java | 8 +-
.../hive/ql/parse/TypeCheckProcFactory.java | 11 +-
.../hadoop/hive/ql/parse/UnionProcessor.java | 6 +-
.../hive/ql/parse/spark/GenSparkUtils.java | 6 +-
.../hive/ql/parse/spark/GenSparkWork.java | 6 +-
.../hive/ql/parse/spark/SparkCompiler.java | 6 +-
.../ql/parse/spark/SparkFileSinkProcessor.java | 8 +-
.../SparkPartitionPruningSinkOperator.java | 6 +-
.../parse/spark/SparkProcessAnalyzeTable.java | 6 +-
.../apache/hadoop/hive/ql/plan/BaseWork.java | 10 +-
.../ql/plan/ConditionalResolverCommonJoin.java | 6 +-
.../hadoop/hive/ql/plan/CreateTableDesc.java | 8 +-
.../hive/ql/plan/ExprNodeGenericFuncDesc.java | 15 +-
.../apache/hadoop/hive/ql/plan/FilterDesc.java | 9 -
.../apache/hadoop/hive/ql/plan/MapJoinDesc.java | 4 +-
.../org/apache/hadoop/hive/ql/plan/MapWork.java | 8 +-
.../org/apache/hadoop/hive/ql/plan/PTFDesc.java | 6 +-
.../apache/hadoop/hive/ql/plan/PlanUtils.java | 44 +-
.../hadoop/hive/ql/plan/ReduceSinkDesc.java | 23 +-
.../apache/hadoop/hive/ql/plan/ReduceWork.java | 8 +-
.../hadoop/hive/ql/plan/TableScanDesc.java | 7 +
.../org/apache/hadoop/hive/ql/plan/TezWork.java | 6 +-
.../hive/ql/plan/VectorReduceSinkDesc.java | 64 +
.../hive/ql/plan/VectorReduceSinkInfo.java | 123 +
.../hadoop/hive/ql/ppd/ExprWalkerInfo.java | 6 +-
.../hive/ql/ppd/ExprWalkerProcFactory.java | 18 +-
.../hadoop/hive/ql/ppd/OpProcFactory.java | 24 +-
.../hadoop/hive/ql/ppd/PredicatePushDown.java | 6 +-
.../hive/ql/ppd/SyntheticJoinPredicate.java | 6 +-
.../ql/processors/AddResourceProcessor.java | 6 +-
.../ql/processors/CommandProcessorFactory.java | 6 +-
.../hadoop/hive/ql/processors/CommandUtil.java | 6 +-
.../hive/ql/processors/CompileProcessor.java | 6 +-
.../hive/ql/processors/CryptoProcessor.java | 6 +-
.../ql/processors/DeleteResourceProcessor.java | 6 +-
.../hadoop/hive/ql/processors/DfsProcessor.java | 6 +-
.../hive/ql/processors/ReloadProcessor.java | 6 +-
.../AuthorizationPreEventListener.java | 6 +-
.../HiveAuthorizationProviderBase.java | 6 +-
.../StorageBasedAuthorizationProvider.java | 6 +-
.../AuthorizationMetaStoreFilterHook.java | 10 +-
.../sqlstd/DummyHiveAuthorizationValidator.java | 6 +-
.../plugin/sqlstd/Operation2Privilege.java | 12 +-
.../plugin/sqlstd/SQLAuthorizationUtils.java | 6 +-
.../sqlstd/SQLStdHiveAccessController.java | 6 +-
.../SQLStdHiveAuthorizationValidator.java | 6 +-
.../hive/ql/session/DependencyResolver.java | 8 +-
.../hadoop/hive/ql/session/OperationLog.java | 12 +-
.../hadoop/hive/ql/session/SessionState.java | 20 +-
.../hive/ql/stats/CounterStatsAggregator.java | 6 +-
.../ql/stats/CounterStatsAggregatorSpark.java | 6 +-
.../ql/stats/CounterStatsAggregatorTez.java | 6 +-
.../hive/ql/stats/CounterStatsPublisher.java | 6 +-
.../hadoop/hive/ql/stats/StatsFactory.java | 6 +-
.../apache/hadoop/hive/ql/stats/StatsUtils.java | 77 +-
.../hive/ql/stats/fs/FSStatsAggregator.java | 10 +-
.../hive/ql/stats/fs/FSStatsPublisher.java | 12 +-
.../hive/ql/txn/AcidHouseKeeperService.java | 10 +-
.../hadoop/hive/ql/txn/compactor/Cleaner.java | 6 +-
.../hive/ql/txn/compactor/CompactorMR.java | 6 +-
.../hive/ql/txn/compactor/CompactorThread.java | 6 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 8 +-
.../org/apache/hadoop/hive/ql/udf/UDFE.java | 6 +-
.../org/apache/hadoop/hive/ql/udf/UDFPI.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFAverage.java | 6 +-
.../ql/udf/generic/GenericUDAFComputeStats.java | 22 +-
.../udf/generic/GenericUDAFContextNGrams.java | 6 +-
.../ql/udf/generic/GenericUDAFEWAHBitmap.java | 6 +-
.../ql/udf/generic/GenericUDAFFirstValue.java | 6 +-
.../generic/GenericUDAFHistogramNumeric.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFLag.java | 6 +-
.../ql/udf/generic/GenericUDAFLastValue.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFLead.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFLeadLag.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFMax.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFMin.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFNTile.java | 6 +-
.../ql/udf/generic/GenericUDAFPercentRank.java | 6 +-
.../generic/GenericUDAFPercentileApprox.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFRank.java | 6 +-
.../ql/udf/generic/GenericUDAFRowNumber.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFSum.java | 6 +-
.../ql/udf/generic/GenericUDAFVariance.java | 6 +-
.../hive/ql/udf/generic/GenericUDAFnGrams.java | 6 +-
.../udf/generic/GenericUDFFromUtcTimestamp.java | 6 +-
.../hive/ql/udf/generic/GenericUDFRegExp.java | 8 +-
.../ql/udf/generic/GenericUDFTimestamp.java | 4 +-
.../hive/ql/udf/generic/GenericUDFToChar.java | 6 +-
.../ql/udf/generic/GenericUDFToVarchar.java | 6 +-
.../ql/udf/generic/GenericUDFUnixTimeStamp.java | 6 +-
.../ql/udf/generic/GenericUDTFJSONTuple.java | 6 +-
.../udf/generic/GenericUDTFParseUrlTuple.java | 6 +-
.../hive/ql/udf/generic/NGramEstimator.java | 4 +-
.../udf/generic/NumDistinctValueEstimator.java | 14 +-
.../hive/ql/udf/ptf/WindowingTableFunction.java | 6 +-
.../hive/ql/util/ZooKeeperHiveHelper.java | 6 +-
.../hadoop/hive/ql/exec/TestExecDriver.java | 6 +-
.../hive/ql/exec/TestFileSinkOperator.java | 6 +-
.../hive/ql/exec/TestFunctionRegistry.java | 2 +-
.../hadoop/hive/ql/exec/TestUtilities.java | 6 +-
.../TestMapJoinMemoryExhaustionHandler.java | 6 +-
.../session/TestSparkSessionManagerImpl.java | 6 +-
.../hive/ql/exec/tez/TestTezSessionPool.java | 6 +-
.../hive/ql/exec/vector/TestVectorSerDeRow.java | 19 +-
.../exec/vector/TestVectorizationContext.java | 6 +-
.../hive/ql/exec/vector/UDFHelloTest.java | 69 +
.../expressions/TestVectorDateExpressions.java | 71 +-
.../hadoop/hive/ql/io/TestAcidInputFormat.java | 88 +
.../apache/hadoop/hive/ql/io/TestRCFile.java | 6 +-
.../hive/ql/io/TestSymlinkTextInputFormat.java | 8 +-
.../hive/ql/io/orc/TestOrcRawRecordMerger.java | 6 +-
.../hive/ql/lockmgr/TestDbTxnManager.java | 13 +-
.../hive/ql/lockmgr/TestDummyTxnManager.java | 10 +-
.../hadoop/hive/ql/log/TestLog4j2Appenders.java | 2 +-
.../parse/TestUpdateDeleteSemanticAnalyzer.java | 6 +-
.../hive/ql/session/TestSessionState.java | 6 +-
.../hive/ql/txn/compactor/CompactorTest.java | 6 +-
.../hive/ql/txn/compactor/TestCleaner.java | 6 +-
.../hive/ql/txn/compactor/TestInitiator.java | 6 +-
.../hive/ql/txn/compactor/TestWorker.java | 6 +-
.../clientnegative/authorization_import.q | 39 +
.../column_change_skewedcol_type1.q | 2 -
.../queries/clientnegative/column_rename5.q | 2 -
...te_skewed_table_col_name_value_no_mismatch.q | 2 -
.../create_skewed_table_dup_col_name.q | 2 -
...eate_skewed_table_failure_invalid_col_name.q | 3 -
.../disallow_incompatible_type_change_on1.q | 6 +-
.../clientnegative/drop_database_cascade.q | 26 +
.../queries/clientnegative/invalid_config1.q | 3 -
.../queries/clientnegative/invalid_config2.q | 4 -
.../clientnegative/load_stored_as_dirs.q | 2 -
.../set_hiveconf_internal_variable0.q | 4 +
.../set_hiveconf_internal_variable1.q | 4 +
.../truncate_column_list_bucketing.q | 1 -
.../clientpositive/add_jar_with_file_removed.q | 15 +
ql/src/test/queries/clientpositive/alter1.q | 6 +-
.../queries/clientpositive/alter_skewed_table.q | 2 -
.../queries/clientpositive/avro_partitioned.q | 3 +-
.../cbo_rp_annotate_stats_groupby.q | 141 +
.../clientpositive/cbo_rp_unionDistinct_2.q | 128 +
.../test/queries/clientpositive/cbo_udf_max.q | 36 +
.../columnarserde_create_shortcut.q | 2 +
.../create_alter_list_bucketing_table1.q | 2 -
.../clientpositive/create_skewed_table1.q | 1 -
.../test/queries/clientpositive/explain_ddl.q | 28 +
.../test/queries/clientpositive/explainuser_3.q | 46 +-
.../clientpositive/groupby_grouping_id3.q | 22 +
.../queries/clientpositive/groupby_sort_8.q | 6 -
.../clientpositive/groupby_sort_test_1.q | 1 -
.../infer_bucket_sort_list_bucket.q | 3 +-
ql/src/test/queries/clientpositive/input3.q | 10 +-
.../queries/clientpositive/insert_dir_distcp.q | 9 +
.../clientpositive/insert_values_nonascii.q | 9 +
.../clientpositive/insertoverwrite_bucket.q | 9 +
.../test/queries/clientpositive/lb_fs_stats.q | 1 -
ql/src/test/queries/clientpositive/lineage2.q | 18 +
ql/src/test/queries/clientpositive/lineage3.q | 3 +-
.../queries/clientpositive/list_bucket_dml_1.q | 1 -
.../queries/clientpositive/list_bucket_dml_10.q | 2 -
.../queries/clientpositive/list_bucket_dml_11.q | 1 -
.../queries/clientpositive/list_bucket_dml_12.q | 1 -
.../queries/clientpositive/list_bucket_dml_13.q | 1 -
.../queries/clientpositive/list_bucket_dml_14.q | 1 -
.../queries/clientpositive/list_bucket_dml_2.q | 1 -
.../queries/clientpositive/list_bucket_dml_3.q | 1 -
.../queries/clientpositive/list_bucket_dml_4.q | 1 -
.../queries/clientpositive/list_bucket_dml_5.q | 1 -
.../queries/clientpositive/list_bucket_dml_6.q | 1 -
.../queries/clientpositive/list_bucket_dml_7.q | 1 -
.../queries/clientpositive/list_bucket_dml_8.q | 1 -
.../queries/clientpositive/list_bucket_dml_9.q | 1 -
.../list_bucket_query_multiskew_1.q | 1 -
.../list_bucket_query_multiskew_2.q | 1 -
.../list_bucket_query_multiskew_3.q | 1 -
.../list_bucket_query_oneskew_1.q | 1 -
.../list_bucket_query_oneskew_2.q | 1 -
.../list_bucket_query_oneskew_3.q | 1 -
.../queries/clientpositive/macro_duplicate.q | 2 +-
ql/src/test/queries/clientpositive/mrr.q | 2 +
.../queries/clientpositive/non_ascii_literal1.q | 1 +
.../queries/clientpositive/non_ascii_literal2.q | 5 +
.../clientpositive/orc_int_type_promotion.q | 2 +
.../clientpositive/parquet_schema_evolution.q | 6 +-
.../partition_wise_fileformat11.q | 4 +-
.../partition_wise_fileformat12.q | 4 +-
.../partition_wise_fileformat13.q | 5 +-
.../partition_wise_fileformat15.q | 4 +-
.../partition_wise_fileformat16.q | 4 +-
ql/src/test/queries/clientpositive/pcs.q | 66 +
.../test/queries/clientpositive/pointlookup.q | 6 +-
.../test/queries/clientpositive/pointlookup2.q | 2 +-
.../test/queries/clientpositive/pointlookup3.q | 2 +-
.../test/queries/clientpositive/pointlookup4.q | 27 +
.../test/queries/clientpositive/quotedid_skew.q | 1 -
.../test/queries/clientpositive/recursive_dir.q | 1 -
.../test/queries/clientpositive/rename_column.q | 4 +-
.../queries/clientpositive/skewjoin_mapjoin1.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin10.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin11.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin2.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin3.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin4.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin5.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin6.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin7.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin8.q | 1 -
.../queries/clientpositive/skewjoin_mapjoin9.q | 1 -
.../clientpositive/skewjoin_union_remove_1.q | 1 -
.../clientpositive/skewjoin_union_remove_2.q | 1 -
.../test/queries/clientpositive/skewjoinopt1.q | 1 -
.../test/queries/clientpositive/skewjoinopt10.q | 1 -
.../test/queries/clientpositive/skewjoinopt11.q | 1 -
.../test/queries/clientpositive/skewjoinopt12.q | 1 -
.../test/queries/clientpositive/skewjoinopt13.q | 1 -
.../test/queries/clientpositive/skewjoinopt14.q | 1 -
.../test/queries/clientpositive/skewjoinopt15.q | 1 -
.../test/queries/clientpositive/skewjoinopt16.q | 1 -
.../test/queries/clientpositive/skewjoinopt17.q | 3 +-
.../test/queries/clientpositive/skewjoinopt18.q | 1 -
.../test/queries/clientpositive/skewjoinopt19.q | 1 -
.../test/queries/clientpositive/skewjoinopt2.q | 1 -
.../test/queries/clientpositive/skewjoinopt20.q | 1 -
.../test/queries/clientpositive/skewjoinopt3.q | 1 -
.../test/queries/clientpositive/skewjoinopt4.q | 1 -
.../test/queries/clientpositive/skewjoinopt5.q | 1 -
.../test/queries/clientpositive/skewjoinopt6.q | 1 -
.../test/queries/clientpositive/skewjoinopt7.q | 1 -
.../test/queries/clientpositive/skewjoinopt8.q | 1 -
.../test/queries/clientpositive/skewjoinopt9.q | 1 -
.../queries/clientpositive/stats_list_bucket.q | 2 -
.../queries/clientpositive/struct_in_view.q | 28 +
.../test/queries/clientpositive/tez_smb_empty.q | 55 +
.../queries/clientpositive/tez_union_with_udf.q | 13 +
.../truncate_column_list_bucket.q | 1 -
.../queries/clientpositive/union_remove_1.q | 1 -
.../queries/clientpositive/union_remove_10.q | 1 -
.../queries/clientpositive/union_remove_11.q | 1 -
.../queries/clientpositive/union_remove_12.q | 1 -
.../queries/clientpositive/union_remove_13.q | 1 -
.../queries/clientpositive/union_remove_14.q | 1 -
.../queries/clientpositive/union_remove_15.q | 1 -
.../queries/clientpositive/union_remove_16.q | 1 -
.../queries/clientpositive/union_remove_17.q | 1 -
.../queries/clientpositive/union_remove_18.q | 1 -
.../queries/clientpositive/union_remove_19.q | 1 -
.../queries/clientpositive/union_remove_2.q | 1 -
.../queries/clientpositive/union_remove_20.q | 1 -
.../queries/clientpositive/union_remove_21.q | 1 -
.../queries/clientpositive/union_remove_22.q | 1 -
.../queries/clientpositive/union_remove_23.q | 1 -
.../queries/clientpositive/union_remove_24.q | 1 -
.../queries/clientpositive/union_remove_25.q | 1 -
.../queries/clientpositive/union_remove_3.q | 1 -
.../queries/clientpositive/union_remove_4.q | 1 -
.../queries/clientpositive/union_remove_5.q | 1 -
.../queries/clientpositive/union_remove_6.q | 1 -
.../clientpositive/union_remove_6_subq.q | 1 -
.../queries/clientpositive/union_remove_7.q | 1 -
.../queries/clientpositive/union_remove_8.q | 1 -
.../queries/clientpositive/union_remove_9.q | 1 -
.../vector_custom_udf_configure.q | 11 +
.../queries/clientpositive/vector_reduce1.q | 47 +
.../queries/clientpositive/vector_reduce2.q | 47 +
.../queries/clientpositive/vector_reduce3.q | 47 +
.../queries/clientpositive/vectorized_case.q | 19 +
.../clientnegative/authorization_import.q.out | 48 +
.../authorization_uri_import.q.out | 29 +
.../disallow_incompatible_type_change_on1.q.out | 3 +-
.../clientnegative/drop_database_cascade.q.out | 85 +
.../clientnegative/exchange_partition.q.out | 2 +
.../clientnegative/invalid_config1.q.out | 2 -
.../clientnegative/invalid_config2.q.out | 2 -
.../set_hiveconf_internal_variable0.q.out | 11 +
.../set_hiveconf_internal_variable1.q.out | 11 +
.../test/results/clientpositive/acid_join.q.out | 2 +-
.../add_jar_with_file_removed.q.out | 27 +
.../alter_partition_change_col.q.out | 240 +-
.../clientpositive/alter_table_cascade.q.out | 40 +-
.../annotate_stats_deep_filters.q.out | 2 +-
.../clientpositive/annotate_stats_filter.q.out | 48 +-
.../clientpositive/annotate_stats_groupby.q.out | 56 +-
.../annotate_stats_groupby2.q.out | 32 +-
.../clientpositive/annotate_stats_join.q.out | 34 +-
.../annotate_stats_join_pkfk.q.out | 50 +-
.../clientpositive/annotate_stats_limit.q.out | 8 +-
.../clientpositive/annotate_stats_part.q.out | 14 +-
.../clientpositive/annotate_stats_select.q.out | 24 +-
.../clientpositive/annotate_stats_table.q.out | 12 +-
.../clientpositive/annotate_stats_union.q.out | 20 +-
.../clientpositive/ansi_sql_arithmetic.q.out | 2 +-
.../clientpositive/auto_sortmerge_join_10.q.out | 100 +-
.../results/clientpositive/avro_decimal.q.out | 10 +-
.../clientpositive/avro_decimal_native.q.out | 10 +-
.../results/clientpositive/bucket_groupby.q.out | 46 +-
.../bucketizedhiveinputformat.q.out | 2 +
.../clientpositive/cast_qualified_types.q.out | 2 +-
.../cbo_rp_annotate_stats_groupby.q.out | 1301 ++
.../clientpositive/cbo_rp_auto_join0.q.out | 8 +-
.../clientpositive/cbo_rp_auto_join1.q.out | 30 +-
.../results/clientpositive/cbo_rp_join0.q.out | 14 +-
.../clientpositive/cbo_rp_lineage2.q.out | 68 +-
.../clientpositive/cbo_rp_unionDistinct_2.q.out | 545 +
.../results/clientpositive/cbo_udf_max.q.out | 62 +
.../results/clientpositive/decimal_1_1.q.out | 48 +-
.../test/results/clientpositive/decimal_3.q.out | 514 +-
.../test/results/clientpositive/decimal_4.q.out | 144 +-
.../test/results/clientpositive/decimal_5.q.out | 180 +-
.../test/results/clientpositive/decimal_6.q.out | 92 +-
.../results/clientpositive/decimal_join2.q.out | 260 +-
.../clientpositive/decimal_precision.q.out | 170 +-
.../clientpositive/decimal_trailing.q.out | 42 +-
.../results/clientpositive/decimal_udf.q.out | 960 +-
.../dynpart_sort_optimization_acid.q.out | 4 +-
.../clientpositive/exchange_partition.q.out | 6 +
.../clientpositive/exchange_partition2.q.out | 6 +
.../clientpositive/exchange_partition3.q.out | 8 +
.../clientpositive/exchgpartition2lel.q.out | 18 +
.../clientpositive/exim_00_nonpart_empty.q.out | 2 +
.../clientpositive/exim_01_nonpart.q.out | 2 +
.../clientpositive/exim_02_00_part_empty.q.out | 2 +
.../results/clientpositive/exim_02_part.q.out | 2 +
.../clientpositive/exim_04_all_part.q.out | 2 +
.../clientpositive/exim_04_evolved_parts.q.out | 2 +
.../clientpositive/exim_05_some_part.q.out | 2 +
.../clientpositive/exim_06_one_part.q.out | 2 +
.../clientpositive/exim_08_nonpart_rename.q.out | 2 +
.../exim_10_external_managed.q.out | 2 +
.../exim_11_managed_external.q.out | 2 +
.../exim_12_external_location.q.out | 2 +
.../exim_13_managed_location.q.out | 2 +
.../clientpositive/exim_18_part_external.q.out | 2 +
.../exim_19_00_part_external_location.q.out | 2 +
.../exim_19_part_external_location.q.out | 2 +
.../exim_20_part_managed_location.q.out | 2 +
.../exim_24_import_nonexist_authsuccess.q.out | 2 +
.../clientpositive/exim_hidden_files.q.out | 2 +
.../results/clientpositive/explain_ddl.q.out | 604 +
.../extrapolate_part_stats_full.q.out | 8 +-
.../extrapolate_part_stats_partial.q.out | 12 +-
.../extrapolate_part_stats_partial_ndv.q.out | 6 +-
.../clientpositive/groupby_grouping_id3.q.out | 60 +
.../results/clientpositive/groupby_sort_8.q.out | 64 -
.../clientpositive/groupby_sort_test_1.q.out | 87 +-
.../clientpositive/import_exported_table.q.out | 3 +
.../clientpositive/infer_bucket_sort.q.out | 4 +-
.../clientpositive/insert_dir_distcp.q.out | 14 +
.../insert_nonacid_from_acid.q.out | 20 +-
.../clientpositive/insert_values_nonascii.q.out | 28 +
.../clientpositive/insertoverwrite_bucket.q.out | 78 +
.../test/results/clientpositive/lineage2.q.out | 98 +-
.../test/results/clientpositive/lineage3.q.out | 60 +-
.../clientpositive/llap/constprog_dpp.q.out | 10 +-
.../llap/dynamic_partition_pruning.q.out | 45 -
.../llap/hybridgrace_hashjoin_1.q.out | 204 +-
.../clientpositive/llap/llapdecider.q.out | 46 +-
.../clientpositive/llap/mapjoin_decimal.q.out | 424 +-
.../vectorized_dynamic_partition_pruning.q.out | 45 -
.../clientpositive/load_dyn_part15.q.out | 6 +-
.../clientpositive/macro_duplicate.q.out | 4 +-
.../multi_insert_lateral_view.q.out | 36 +-
.../clientpositive/non_ascii_literal1.q.out | 9 +
.../clientpositive/non_ascii_literal2.q.out | 23 +
.../results/clientpositive/orc_file_dump.q.out | 6 +-
.../clientpositive/orc_predicate_pushdown.q.out | 4 +-
.../clientpositive/parquet_decimal.q.out | 16 +-
.../clientpositive/parquet_ppd_boolean.q.out | 180 +-
.../clientpositive/parquet_ppd_char.q.out | 220 +-
.../clientpositive/parquet_ppd_date.q.out | 330 +-
.../clientpositive/parquet_ppd_decimal.q.out | 660 +-
.../clientpositive/parquet_ppd_timestamp.q.out | 320 +-
.../clientpositive/parquet_ppd_varchar.q.out | 220 +-
.../parquet_predicate_pushdown.q.out | 4 +-
ql/src/test/results/clientpositive/pcs.q.out | 2249 +++
.../results/clientpositive/pointlookup.q.out | 8 +-
.../results/clientpositive/pointlookup4.q.out | 530 +
.../clientpositive/repl_2_exim_basic.q.out | 4 +
.../results/clientpositive/serde_regex.q.out | 74 +-
.../spark/annotate_stats_join.q.out | 34 +-
.../spark/auto_sortmerge_join_10.q.out | 45 +-
.../spark/avro_decimal_native.q.out | 10 +-
.../spark/bucketizedhiveinputformat.q.out | 2 +
.../clientpositive/spark/decimal_1_1.q.out | 48 +-
.../spark/import_exported_table.q.out | 3 +
.../clientpositive/spark/load_dyn_part15.q.out | 6 +-
.../clientpositive/spark/mapjoin_decimal.q.out | 424 +-
.../spark/multi_insert_lateral_view.q.out | 36 +-
.../spark/union_lateralview.q.out | 4 +-
.../spark/vector_between_in.q.out | 14 +-
.../spark/vector_cast_constant.q.java1.7.out | 20 +-
.../spark/vector_data_types.q.out | 4 +-
.../spark/vector_decimal_aggregate.q.out | 32 +-
.../spark/vector_decimal_mapjoin.q.out | 212 +-
.../clientpositive/spark/vectorized_case.q.out | 109 +-
.../results/clientpositive/stats_ppr_all.q.out | 8 +-
.../results/clientpositive/struct_in_view.q.out | 118 +
.../clientpositive/sum_expr_with_order.q.out | 2 +-
.../tez/auto_sortmerge_join_10.q.out | 71 +-
.../tez/dynamic_partition_pruning.q.out | 45 -
.../clientpositive/tez/explainuser_1.q.out | 368 +-
.../clientpositive/tez/explainuser_2.q.out | 38 +
.../clientpositive/tez/explainuser_3.q.out | 230 +-
.../tez/hybridgrace_hashjoin_1.q.out | 204 +-
.../clientpositive/tez/insert_dir_distcp.q.out | 14 +
.../clientpositive/tez/llapdecider.q.out | 46 +-
.../clientpositive/tez/mapjoin_decimal.q.out | 424 +-
.../clientpositive/tez/tez_smb_empty.q.out | 676 +
.../clientpositive/tez/tez_union_with_udf.q.out | 36 +
.../clientpositive/tez/update_all_types.q.out | 30 +-
.../clientpositive/tez/vector_aggregate_9.q.out | 2 +-
.../tez/vector_aggregate_without_gby.q.out | 85 +
.../tez/vector_auto_smb_mapjoin_14.q.out | 32 +-
.../clientpositive/tez/vector_between_in.q.out | 14 +-
.../clientpositive/tez/vector_bround.q.out | 66 +
.../tez/vector_cast_constant.q.java1.7.out | 20 +-
.../clientpositive/tez/vector_data_types.q.out | 4 +-
.../clientpositive/tez/vector_decimal_2.q.out | 4 +-
.../clientpositive/tez/vector_decimal_3.q.out | 514 +-
.../clientpositive/tez/vector_decimal_4.q.out | 288 +-
.../clientpositive/tez/vector_decimal_5.q.out | 180 +-
.../clientpositive/tez/vector_decimal_6.q.out | 172 +-
.../tez/vector_decimal_aggregate.q.out | 32 +-
.../tez/vector_decimal_cast.q.out | 20 +-
.../tez/vector_decimal_expressions.q.out | 20 +-
.../tez/vector_decimal_mapjoin.q.out | 212 +-
.../tez/vector_decimal_precision.q.out | 170 +-
.../tez/vector_decimal_round_2.q.out | 14 +-
.../tez/vector_decimal_trailing.q.out | 42 +-
.../clientpositive/tez/vector_decimal_udf.q.out | 960 +-
.../results/clientpositive/tez/vector_nvl.q.out | 194 +
.../clientpositive/tez/vector_reduce1.q.out | 2167 +++
.../clientpositive/tez/vector_reduce2.q.out | 2167 +++
.../clientpositive/tez/vector_reduce3.q.out | 2167 +++
.../tez/vector_reduce_groupby_decimal.q.out | 98 +-
.../clientpositive/tez/vector_struct_in.q.out | 645 +
.../tez/vectorization_part_varchar.q.out | 72 +
.../clientpositive/tez/vectorized_case.q.out | 109 +-
.../tez/vectorized_distinct_gby.q.out | 4 +-
.../vectorized_dynamic_partition_pruning.q.out | 45 -
.../tez/vectorized_parquet_types.q.out | 151 +-
.../tez/vectorized_timestamp_ints_casts.q.out | 50 +-
.../clientpositive/union_lateralview.q.out | 4 +-
.../clientpositive/update_all_types.q.out | 30 +-
.../clientpositive/vector_aggregate_9.q.out | 2 +-
.../clientpositive/vector_between_in.q.out | 14 +-
.../vector_cast_constant.q.java1.7.out | 20 +-
.../vector_custom_udf_configure.q.out | 70 +
.../clientpositive/vector_data_types.q.out | 4 +-
.../clientpositive/vector_decimal_2.q.out | 4 +-
.../clientpositive/vector_decimal_3.q.out | 514 +-
.../clientpositive/vector_decimal_4.q.out | 288 +-
.../clientpositive/vector_decimal_5.q.out | 180 +-
.../clientpositive/vector_decimal_6.q.out | 172 +-
.../vector_decimal_aggregate.q.out | 32 +-
.../clientpositive/vector_decimal_cast.q.out | 20 +-
.../vector_decimal_expressions.q.out | 20 +-
.../clientpositive/vector_decimal_mapjoin.q.out | 212 +-
.../vector_decimal_precision.q.out | 170 +-
.../clientpositive/vector_decimal_round_2.q.out | 14 +-
.../vector_decimal_trailing.q.out | 42 +-
.../clientpositive/vector_decimal_udf.q.out | 960 +-
.../results/clientpositive/vector_reduce1.q.out | 2160 +++
.../results/clientpositive/vector_reduce2.q.out | 2160 +++
.../results/clientpositive/vector_reduce3.q.out | 2160 +++
.../vector_reduce_groupby_decimal.q.out | 98 +-
.../clientpositive/vectorized_case.q.out | 69 +
.../vectorized_distinct_gby.q.out | 4 +-
.../clientpositive/windowing_decimal.q.out | 104 +-
.../clientpositive/windowing_navfn.q.out | 20 +-
.../results/clientpositive/windowing_rank.q.out | 60 +-
.../clientpositive/windowing_windowspec3.q.out | 18 +-
serde/pom.xml | 5 -
.../hive/serde2/AbstractEncodingAwareSerDe.java | 6 +-
.../hadoop/hive/serde2/AbstractSerDe.java | 9 +
.../hive/serde2/ColumnProjectionUtils.java | 15 +-
.../hadoop/hive/serde2/DelimitedJSONSerDe.java | 6 +-
.../serde2/MetadataTypedColumnsetSerDe.java | 8 +-
.../apache/hadoop/hive/serde2/OpenCSVSerde.java | 6 +-
.../apache/hadoop/hive/serde2/RegexSerDe.java | 6 +-
.../apache/hadoop/hive/serde2/SerDeUtils.java | 9 +-
.../apache/hadoop/hive/serde2/WriteBuffers.java | 58 +-
.../hive/serde2/avro/AvroDeserializer.java | 6 +-
.../serde2/avro/AvroLazyObjectInspector.java | 8 +-
.../hadoop/hive/serde2/avro/AvroSerDe.java | 25 +-
.../hadoop/hive/serde2/avro/AvroSerdeUtils.java | 6 +-
.../hadoop/hive/serde2/avro/AvroSerializer.java | 4 +-
.../hadoop/hive/serde2/avro/InstanceCache.java | 6 +-
.../binarysortable/BinarySortableSerDe.java | 6 +-
.../fast/BinarySortableDeserializeRead.java | 33 +-
.../fast/BinarySortableSerializeWrite.java | 17 +-
.../hive/serde2/columnar/ColumnarSerDe.java | 8 +-
.../hive/serde2/columnar/ColumnarStruct.java | 6 +-
.../hive/serde2/dynamic_type/DynamicSerDe.java | 6 +-
.../hive/serde2/fast/DeserializeRead.java | 6 +-
.../hadoop/hive/serde2/fast/SerializeWrite.java | 2 +-
.../hadoop/hive/serde2/io/DateWritable.java | 1 +
.../serde2/io/HiveIntervalDayTimeWritable.java | 6 +-
.../io/HiveIntervalYearMonthWritable.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyBinary.java | 8 +-
.../hadoop/hive/serde2/lazy/LazyDate.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyDouble.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyFloat.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyHiveChar.java | 6 +-
.../hive/serde2/lazy/LazyHiveDecimal.java | 10 +-
.../hive/serde2/lazy/LazyHiveVarchar.java | 6 +-
.../apache/hadoop/hive/serde2/lazy/LazyMap.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyPrimitive.java | 6 +-
.../hive/serde2/lazy/LazySerDeParameters.java | 6 +-
.../hive/serde2/lazy/LazySimpleSerDe.java | 5 -
.../hadoop/hive/serde2/lazy/LazyStruct.java | 6 +-
.../hadoop/hive/serde2/lazy/LazyTimestamp.java | 10 +-
.../hadoop/hive/serde2/lazy/LazyUtils.java | 3 +-
.../lazy/fast/LazySimpleDeserializeRead.java | 171 +-
.../lazy/fast/LazySimpleSerializeWrite.java | 24 +-
.../LazyListObjectInspector.java | 6 +-
.../objectinspector/LazyMapObjectInspector.java | 6 +-
.../LazyUnionObjectInspector.java | 8 +-
.../hive/serde2/lazybinary/LazyBinaryDate.java | 6 +-
.../LazyBinaryHiveIntervalDayTime.java | 6 +-
.../LazyBinaryHiveIntervalYearMonth.java | 6 +-
.../hive/serde2/lazybinary/LazyBinaryMap.java | 6 +-
.../hive/serde2/lazybinary/LazyBinarySerDe.java | 6 +-
.../serde2/lazybinary/LazyBinaryStruct.java | 6 +-
.../serde2/lazybinary/LazyBinaryTimestamp.java | 6 +-
.../hive/serde2/lazybinary/LazyBinaryUnion.java | 6 +-
.../hive/serde2/lazybinary/LazyBinaryUtils.java | 2 +-
.../fast/LazyBinaryDeserializeRead.java | 31 +-
.../fast/LazyBinarySerializeWrite.java | 8 +-
.../objectinspector/ObjectInspectorUtils.java | 32 +-
.../StandardStructObjectInspector.java | 8 +-
.../PrimitiveObjectInspectorUtils.java | 6 +-
.../WritableHiveVarcharObjectInspector.java | 6 +-
.../serde2/thrift/TBinarySortableProtocol.java | 6 +-
.../serde2/thrift/TCTLSeparatedProtocol.java | 6 +-
.../hive/serde2/typeinfo/TypeInfoUtils.java | 98 +-
.../apache/hadoop/hive/serde2/VerifyFast.java | 9 +-
.../hive/serde2/avro/TestTypeInfoToSchema.java | 7 +-
.../binarysortable/TestBinarySortableFast.java | 3 +-
.../hive/serde2/lazy/TestLazySimpleFast.java | 3 +-
.../serde2/lazybinary/TestLazyBinaryFast.java | 3 +-
service/pom.xml | 5 -
.../apache/hive/service/AbstractService.java | 6 +-
.../apache/hive/service/CompositeService.java | 6 +-
.../org/apache/hive/service/CookieSigner.java | 6 +-
.../apache/hive/service/ServiceOperations.java | 6 +-
.../org/apache/hive/service/ServiceUtils.java | 25 +
.../hive/service/auth/HiveAuthFactory.java | 8 +-
.../apache/hive/service/auth/HttpAuthUtils.java | 6 +-
.../auth/LdapAuthenticationProviderImpl.java | 6 +-
.../org/apache/hive/service/cli/CLIService.java | 8 +-
.../cli/operation/GetTablesOperation.java | 47 +-
.../cli/operation/HiveCommandOperation.java | 10 +-
.../cli/operation/LogDivertAppender.java | 7 +-
.../cli/operation/MetadataOperation.java | 23 +-
.../hive/service/cli/operation/Operation.java | 6 +-
.../service/cli/operation/OperationManager.java | 9 +-
.../service/cli/session/HiveSessionImpl.java | 33 +-
.../cli/session/HiveSessionImplwithUGI.java | 6 +-
.../service/cli/session/SessionManager.java | 6 +-
.../thrift/RetryingThriftCLIServiceClient.java | 6 +-
.../cli/thrift/ThriftBinaryCLIService.java | 2 +-
.../service/cli/thrift/ThriftCLIService.java | 6 +-
.../cli/thrift/ThriftHttpCLIService.java | 3 +-
.../service/cli/thrift/ThriftHttpServlet.java | 6 +-
.../apache/hive/service/server/HiveServer2.java | 17 +-
.../server/ThreadWithGarbageCleanup.java | 6 +-
.../apache/hive/service/cli/CLIServiceTest.java | 6 +-
shims/0.23/pom.xml | 5 -
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 3 +
.../apache/hadoop/mapred/WebHCatJTShim23.java | 10 +-
shims/common/pom.xml | 16 +-
.../org/apache/hadoop/fs/DefaultFileAccess.java | 6 +-
.../apache/hadoop/hive/shims/HadoopShims.java | 18 +-
.../hadoop/hive/shims/HadoopShimsSecure.java | 6 +-
.../apache/hadoop/hive/thrift/DBTokenStore.java | 7 +-
.../hive/thrift/HadoopThriftAuthBridge.java | 12 +-
.../hadoop/hive/thrift/ZooKeeperTokenStore.java | 2 +-
shims/scheduler/pom.xml | 5 -
.../hadoop/hive/schshim/FairSchedulerShim.java | 6 +-
.../hive/spark/client/SparkClientUtilities.java | 7 +-
.../hive/spark/counter/SparkCounters.java | 6 +-
.../hadoop/hive/common/io/DiskRangeList.java | 6 +-
.../hadoop/hive/common/type/HiveDecimal.java | 11 +
.../hive/ql/exec/vector/ColumnVector.java | 3 +-
.../ql/exec/vector/DecimalColumnVector.java | 2 -
.../hive/ql/io/sarg/SearchArgumentImpl.java | 5 -
.../hive/serde2/io/HiveDecimalWritable.java | 4 -
testutils/ptest2/pom.xml | 5 -
1287 files changed, 57004 insertions(+), 21783 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 2ab9c2d,6951993..6abef4e
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@@ -24,9 -23,8 +24,10 @@@ import java.util.List
import java.util.Map;
import java.util.Set;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper;
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
index 52f4b9c,84603d5..09c54c1
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/JobMetricsListener.java
@@@ -21,19 -21,33 +21,20 @@@ import java.util.Iterator
import java.util.List;
import java.util.Map;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
+import org.apache.spark.JavaSparkListener;
import org.apache.spark.executor.TaskMetrics;
-import org.apache.spark.scheduler.SparkListener;
-import org.apache.spark.scheduler.SparkListenerApplicationEnd;
-import org.apache.spark.scheduler.SparkListenerApplicationStart;
-import org.apache.spark.scheduler.SparkListenerBlockManagerAdded;
-import org.apache.spark.scheduler.SparkListenerBlockManagerRemoved;
-import org.apache.spark.scheduler.SparkListenerEnvironmentUpdate;
-import org.apache.spark.scheduler.SparkListenerExecutorMetricsUpdate;
-import org.apache.spark.scheduler.SparkListenerJobEnd;
import org.apache.spark.scheduler.SparkListenerJobStart;
-import org.apache.spark.scheduler.SparkListenerStageCompleted;
-import org.apache.spark.scheduler.SparkListenerStageSubmitted;
import org.apache.spark.scheduler.SparkListenerTaskEnd;
-import org.apache.spark.scheduler.SparkListenerTaskGettingResult;
-import org.apache.spark.scheduler.SparkListenerTaskStart;
-import org.apache.spark.scheduler.SparkListenerUnpersistRDD;
-import org.apache.spark.scheduler.SparkListenerExecutorRemoved;
-import org.apache.spark.scheduler.SparkListenerExecutorAdded;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-public class JobMetricsListener implements SparkListener {
+public class JobMetricsListener extends JavaSparkListener {
- private static final Log LOG = LogFactory.getLog(JobMetricsListener.class);
+ private static final Logger LOG = LoggerFactory.getLogger(JobMetricsListener.class);
private final Map<Integer, int[]> jobIdToStageId = Maps.newHashMap();
private final Map<Integer, Integer> stageIdToJobId = Maps.newHashMap();
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cad0ea6a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
----------------------------------------------------------------------
diff --cc spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
index bbbd97b,cd38346..b779f3f
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientUtilities.java
@@@ -24,20 -24,18 +24,21 @@@ import java.io.File
import java.net.URL;
import java.net.URLClassLoader;
import java.util.List;
-import java.util.Set;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.lang.StringUtils;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class SparkClientUtilities {
- protected static final transient Log LOG = LogFactory.getLog(SparkClientUtilities.class);
+ protected static final transient Logger LOG = LoggerFactory.getLogger(SparkClientUtilities.class);
+
+ private static final Map<String, Long> downloadedFiles = new ConcurrentHashMap<>();
+
/**
* Add new elements to the classpath.
*
[04/23] hive git commit: HIVE-11180: Enable native vectorized map
join for spark [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
HIVE-11180: Enable native vectorized map join for spark [Spark Branch] (Rui reviewed by Xuefu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/80f548af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/80f548af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/80f548af
Branch: refs/heads/master
Commit: 80f548af3b762abc7775fdfeb21b0d2d9d417c09
Parents: 714b3db
Author: Rui Li <ru...@intel.com>
Authored: Thu Aug 6 13:58:50 2015 +0800
Committer: Rui Li <ru...@intel.com>
Committed: Thu Aug 6 14:09:36 2015 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 4 +-
.../test/resources/testconfiguration.properties | 9 +-
.../persistence/MapJoinTableContainerSerDe.java | 70 +
.../hive/ql/exec/spark/HashTableLoader.java | 18 +-
.../mapjoin/VectorMapJoinCommonOperator.java | 4 +-
.../fast/VectorMapJoinFastTableContainer.java | 2 +-
.../hive/ql/optimizer/physical/Vectorizer.java | 6 +-
.../optimizer/spark/SparkMapJoinOptimizer.java | 10 +
.../spark/vector_inner_join.q.out | 853 +++++++++++
.../spark/vector_outer_join0.q.out | 242 +++
.../spark/vector_outer_join1.q.out | 631 ++++++++
.../spark/vector_outer_join2.q.out | 327 ++++
.../spark/vector_outer_join3.q.out | 630 ++++++++
.../spark/vector_outer_join4.q.out | 1000 +++++++++++++
.../spark/vector_outer_join5.q.out | 1406 ++++++++++++++++++
15 files changed, 5201 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f593d7d..73610dc 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -765,8 +765,8 @@ public class HiveConf extends Configuration {
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
- "Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,\n" +
- "because memory-optimized hashtable cannot be serialized."),
+ "Whether Hive should use memory-optimized hash table for MapJoin.\n" +
+ "Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index c710b0b..b04c5d5 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1181,7 +1181,14 @@ miniSparkOnYarn.query.files=auto_sortmerge_join_16.q,\
stats_counter_partitioned.q,\
temp_table_external.q,\
truncate_column_buckets.q,\
- uber_reduce.q
+ uber_reduce.q,\
+ vector_inner_join.q,\
+ vector_outer_join0.q,\
+ vector_outer_join1.q,\
+ vector_outer_join2.q,\
+ vector_outer_join3.q,\
+ vector_outer_join4.q,\
+ vector_outer_join5.q
spark.query.negative.files=groupby2_map_skew_multi_distinct.q,\
groupby2_multi_distinct.q,\
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
index e97a9f0..d6deabe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java
@@ -32,7 +32,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.vector.mapjoin.fast.VectorMapJoinFastTableContainer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -195,6 +197,74 @@ public class MapJoinTableContainerSerDe {
}
}
+ /**
+ * Loads the small table into a VectorMapJoinFastTableContainer. Only used on Spark path.
+ * @param mapJoinDesc The descriptor for the map join
+ * @param fs FileSystem of the folder.
+ * @param folder The folder to load table container.
+ * @param hconf The hive configuration
+ * @return Loaded table.
+ */
+ @SuppressWarnings("unchecked")
+ public MapJoinTableContainer loadFastContainer(MapJoinDesc mapJoinDesc,
+ FileSystem fs, Path folder, Configuration hconf) throws HiveException {
+ try {
+ if (!fs.isDirectory(folder)) {
+ throw new HiveException("Error, not a directory: " + folder);
+ }
+ FileStatus[] fileStatuses = fs.listStatus(folder);
+ if (fileStatuses == null || fileStatuses.length == 0) {
+ return null;
+ }
+
+ SerDe keySerDe = keyContext.getSerDe();
+ SerDe valueSerDe = valueContext.getSerDe();
+ Writable key = keySerDe.getSerializedClass().newInstance();
+ Writable value = valueSerDe.getSerializedClass().newInstance();
+
+ VectorMapJoinFastTableContainer tableContainer =
+ new VectorMapJoinFastTableContainer(mapJoinDesc, hconf, -1);
+
+ for (FileStatus fileStatus : fileStatuses) {
+ Path filePath = fileStatus.getPath();
+ if (ShimLoader.getHadoopShims().isDirectory(fileStatus)) {
+ throw new HiveException("Error, not a file: " + filePath);
+ }
+ InputStream is = null;
+ ObjectInputStream in = null;
+ try {
+ is = fs.open(filePath, 4096);
+ in = new ObjectInputStream(is);
+ // skip the name and metadata
+ in.readUTF();
+ in.readObject();
+ int numKeys = in.readInt();
+ for (int keyIndex = 0; keyIndex < numKeys; keyIndex++) {
+ key.readFields(in);
+ long numRows = in.readLong();
+ for (long rowIndex = 0L; rowIndex < numRows; rowIndex++) {
+ value.readFields(in);
+ tableContainer.putRow(null, key, null, value);
+ }
+ }
+ } finally {
+ if (in != null) {
+ in.close();
+ } else if (is != null) {
+ is.close();
+ }
+ }
+ }
+
+ tableContainer.seal();
+ return tableContainer;
+ } catch (IOException e) {
+ throw new HiveException("IO error while trying to create table container", e);
+ } catch (Exception e) {
+ throw new HiveException("Error while trying to create table container", e);
+ }
+ }
+
public void persist(ObjectOutputStream out, MapJoinPersistableTableContainer tableContainer)
throws HiveException {
int numKeys = tableContainer.size();
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
index 10e3497..c2462a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.SparkBucketMapJoinContext;
+import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.mapred.JobConf;
@@ -62,6 +63,8 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
private MapJoinOperator joinOp;
private MapJoinDesc desc;
+ private boolean useFastContainer = false;
+
@Override
public void init(ExecMapperContext context, MapredContext mrContext, Configuration hconf,
MapJoinOperator joinOp) {
@@ -69,6 +72,12 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
this.hconf = hconf;
this.joinOp = joinOp;
this.desc = joinOp.getConf();
+ if (desc.getVectorMode() && HiveConf.getBoolVar(
+ hconf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED)) {
+ VectorMapJoinDesc vectorDesc = desc.getVectorDesc();
+ useFastContainer = vectorDesc != null && vectorDesc.hashTableImplementationType() ==
+ VectorMapJoinDesc.HashTableImplementationType.FAST;
+ }
}
@Override
@@ -98,7 +107,7 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
FileSystem fs = FileSystem.get(baseDir.toUri(), hconf);
BucketMapJoinContext mapJoinCtx = localWork.getBucketMapjoinContext();
boolean firstContainer = true;
- boolean useOptimizedContainer = HiveConf.getBoolVar(
+ boolean useOptimizedContainer = !useFastContainer && HiveConf.getBoolVar(
hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE);
for (int pos = 0; pos < mapJoinTables.length; pos++) {
if (pos == desc.getPosBigTable() || mapJoinTables[pos] != null) {
@@ -146,14 +155,17 @@ public class HashTableLoader implements org.apache.hadoop.hive.ql.exec.HashTable
MapJoinTableContainerSerDe mapJoinTableSerde) throws HiveException {
LOG.info("\tLoad back all hashtable files from tmp folder uri:" + path);
if (!SparkUtilities.isDedicatedCluster(hconf)) {
- return mapJoinTableSerde.load(fs, path, hconf);
+ return useFastContainer ? mapJoinTableSerde.loadFastContainer(desc, fs, path, hconf) :
+ mapJoinTableSerde.load(fs, path, hconf);
}
MapJoinTableContainer mapJoinTable = SmallTableCache.get(path);
if (mapJoinTable == null) {
synchronized (path.toString().intern()) {
mapJoinTable = SmallTableCache.get(path);
if (mapJoinTable == null) {
- mapJoinTable = mapJoinTableSerde.load(fs, path, hconf);
+ mapJoinTable = useFastContainer ?
+ mapJoinTableSerde.loadFastContainer(desc, fs, path, hconf) :
+ mapJoinTableSerde.load(fs, path, hconf);
SmallTableCache.cache(path, mapJoinTable);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index 87ebcf2..efad421 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -541,7 +541,9 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
break;
case FAST:
// Use our specialized hash table loader.
- hashTableLoader = new VectorMapJoinFastHashTableLoader();
+ hashTableLoader = HiveConf.getVar(
+ hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark") ?
+ HashTableLoaderFactory.getLoader(hconf) : new VectorMapJoinFastHashTableLoader();
break;
default:
throw new RuntimeException("Unknown vector map join hash table implementation type " + hashTableImplementationType.name());
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
index f2080f4..cf6c0e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java
@@ -195,7 +195,7 @@ public class VectorMapJoinFastTableContainer implements VectorMapJoinTableContai
@Override
public void clear() {
- throw new RuntimeException("Not applicable");
+ // Do nothing
}
@Override
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 82c3e50..4f66cd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -157,6 +157,7 @@ public class Vectorizer implements PhysicalPlanResolver {
private PhysicalContext physicalContext = null;
private HiveConf hiveConf;
+ private boolean isSpark;
public Vectorizer() {
@@ -873,6 +874,7 @@ public class Vectorizer implements PhysicalPlanResolver {
LOG.info("Vectorization is disabled");
return physicalContext;
}
+ isSpark = (HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark"));
// create dispatcher and graph walker
Dispatcher disp = new VectorizationDispatcher(physicalContext);
TaskGraphWalker ogw = new TaskGraphWalker(disp);
@@ -1444,8 +1446,6 @@ public class Vectorizer implements PhysicalPlanResolver {
Operator<? extends OperatorDesc> vectorOp = null;
Class<? extends Operator<?>> opClass = null;
- boolean isOuterJoin = !desc.getNoOuterJoin();
-
VectorMapJoinDesc.HashTableImplementationType hashTableImplementationType = HashTableImplementationType.NONE;
VectorMapJoinDesc.HashTableKind hashTableKind = HashTableKind.NONE;
VectorMapJoinDesc.HashTableKeyType hashTableKeyType = HashTableKeyType.NONE;
@@ -1666,7 +1666,7 @@ public class Vectorizer implements PhysicalPlanResolver {
case MAPJOIN:
{
MapJoinDesc desc = (MapJoinDesc) op.getConf();
- boolean specialize = canSpecializeMapJoin(op, desc, isTez);
+ boolean specialize = canSpecializeMapJoin(op, desc, isTez || isSpark);
if (!specialize) {
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
index 39d1f18..46eab65 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.OpTraits;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.Statistics;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
/**
* SparkMapJoinOptimizer cloned from ConvertJoinMapJoin is an optimization that replaces a common join
@@ -89,6 +91,14 @@ public class SparkMapJoinOptimizer implements NodeProcessor {
LOG.info("Convert to non-bucketed map join");
MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversionPos);
+ // For native vectorized map join, we require the key SerDe to be BinarySortableSerDe
+ // Note: the MJ may not really get natively-vectorized later,
+ // but changing SerDe won't hurt correctness
+ if (conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED) &&
+ conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED)) {
+ mapJoinOp.getConf().getKeyTblDesc().getProperties().setProperty(
+ serdeConstants.SERIALIZATION_LIB, BinarySortableSerDe.class.getName());
+ }
if (conf.getBoolVar(HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) {
LOG.info("Check if it can be converted to bucketed map join");
numBuckets = convertJoinBucketMapJoin(joinOp, mapJoinOp,
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
new file mode 100644
index 0000000..d1b775f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
@@ -0,0 +1,853 @@
+PREHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_1a
+POSTHOOK: query: CREATE TABLE orc_table_1a(a INT) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_1a
+PREHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_2a
+POSTHOOK: query: CREATE TABLE orc_table_2a(c INT) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_2a
+PREHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@orc_table_1a
+POSTHOOK: query: insert into table orc_table_1a values(1),(1), (2),(3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@orc_table_1a
+POSTHOOK: Lineage: orc_table_1a.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@orc_table_2a
+POSTHOOK: query: insert into table orc_table_2a values(0),(2), (3),(null),(4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@orc_table_2a
+POSTHOOK: Lineage: orc_table_2a.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: explain
+select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col4
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col4 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1a
+PREHOOK: Input: default@orc_table_2a
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.a from orc_table_2a t2 join orc_table_1a t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1a
+POSTHOOK: Input: default@orc_table_2a
+#### A masked pattern was here ####
+3
+PREHOOK: query: explain
+select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: a is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: c (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Semi Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1a
+PREHOOK: Input: default@orc_table_2a
+#### A masked pattern was here ####
+POSTHOOK: query: select t2.c from orc_table_2a t2 left semi join orc_table_1a t1 on t1.a = t2.c where t2.c > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1a
+POSTHOOK: Input: default@orc_table_2a
+#### A masked pattern was here ####
+3
+PREHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_1b
+POSTHOOK: query: CREATE TABLE orc_table_1b(v1 STRING, a INT) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_1b
+PREHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_2b
+POSTHOOK: query: CREATE TABLE orc_table_2b(c INT, v2 STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_2b
+PREHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__3
+PREHOOK: Output: default@orc_table_1b
+POSTHOOK: query: insert into table orc_table_1b values("one", 1),("one", 1), ("two", 2),("three", 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__3
+POSTHOOK: Output: default@orc_table_1b
+POSTHOOK: Lineage: orc_table_1b.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: orc_table_1b.v1 SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, "<NULL>"),(4, "FOUR")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__4
+PREHOOK: Output: default@orc_table_2b
+POSTHOOK: query: insert into table orc_table_2b values(0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, "<NULL>"),(4, "FOUR")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__4
+POSTHOOK: Output: default@orc_table_2b
+POSTHOOK: Lineage: orc_table_2b.c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: orc_table_2b.v2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain
+select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col5 (type: string), _col6 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.v1, t1.a from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+three 3
+PREHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col5 (type: string), _col6 (type: int), _col0 (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+three 3 3 THREE
+PREHOOK: query: explain
+select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col5 (type: string), (_col6 * 2) (type: int), (_col0 * 5) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.v1, t1.a*2, t2.c*5, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+three 6 15 THREE
+PREHOOK: query: explain
+select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col0, _col1, _col5
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col5 (type: string), _col1 (type: string), _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+three THREE 3
+PREHOOK: query: explain
+select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 c (type: int)
+ 1 a (type: int)
+ outputColumnNames: _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col6 (type: int), _col5 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_2b t2 join orc_table_1b t1 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+3 three THREE
+PREHOOK: query: explain
+select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ outputColumnNames: _col0, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col6 (type: string), _col5 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.v1, t2.v2, t2.c from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+three THREE 3
+PREHOOK: query: explain
+select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 5 Data size: 456 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (c > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 4 Data size: 364 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (a > 2) (type: boolean)
+ Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ outputColumnNames: _col0, _col1, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: int), _col0 (type: string), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1b
+PREHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.a, t1.v1, t2.v2 from orc_table_1b t1 join orc_table_2b t2 on t1.a = t2.c where t1.a > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1b
+POSTHOOK: Input: default@orc_table_2b
+#### A masked pattern was here ####
+3 three THREE
http://git-wip-us.apache.org/repos/asf/hive/blob/80f548af/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
new file mode 100644
index 0000000..cc66db5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
@@ -0,0 +1,242 @@
+PREHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_1
+POSTHOOK: query: CREATE TABLE orc_table_1(v1 STRING, a INT) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_1
+PREHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_2
+POSTHOOK: query: CREATE TABLE orc_table_2(c INT, v2 STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_2
+PREHOOK: query: insert into table orc_table_1 values ("<null1>", null),("one", 1),("one", 1),("two", 2),("three", 3),("<null2>", null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@orc_table_1
+POSTHOOK: query: insert into table orc_table_1 values ("<null1>", null),("one", 1),("one", 1),("two", 2),("three", 3),("<null2>", null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@orc_table_1
+POSTHOOK: Lineage: orc_table_1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: orc_table_1.v1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, "<NULL1>"),(4, "FOUR"),(null, "<NULL2>")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@orc_table_2
+POSTHOOK: query: insert into table orc_table_2 values (0, "ZERO"),(2, "TWO"), (3, "THREE"),(null, "<NULL1>"),(4, "FOUR"),(null, "<NULL2>")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@orc_table_2
+POSTHOOK: Lineage: orc_table_2.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: orc_table_2.v2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select * from orc_table_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_table_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1
+#### A masked pattern was here ####
+<null1> NULL
+<null2> NULL
+one 1
+one 1
+three 3
+two 2
+PREHOOK: query: select * from orc_table_2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_table_2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+0 ZERO
+2 TWO
+3 THREE
+4 FOUR
+NULL <NULL1>
+NULL <NULL2>
+PREHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1
+PREHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1
+POSTHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+<null1> NULL NULL NULL
+<null2> NULL NULL NULL
+one 1 NULL NULL
+one 1 NULL NULL
+three 3 3 THREE
+two 2 2 TWO
+PREHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-1 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+ Spark HashTable Sink Operator
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 a (type: int)
+ 1 c (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: int), _col5 (type: int), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 598 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Local Work:
+ Map Reduce Local Work
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_table_1
+PREHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_table_1
+POSTHOOK: Input: default@orc_table_2
+#### A masked pattern was here ####
+NULL NULL 0 ZERO
+NULL NULL 4 FOUR
+NULL NULL NULL <NULL1>
+NULL NULL NULL <NULL2>
+three 3 3 THREE
+two 2 2 TWO
[10/23] hive git commit: HIVE-12091: Merge file doesn't work for ORC
table when running on Spark. [Spark Branch] (Rui reviewed by Xuefu)
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
new file mode 100644
index 0000000..b9b3960
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
@@ -0,0 +1,508 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc file merge tests for static partitions
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc file merge tests for static partitions
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
new file mode 100644
index 0000000..6c8bcfa
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
@@ -0,0 +1,619 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-2 depends on stages: Stage-0
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-5
+ Spark
+#### A masked pattern was here ####
+ Vertices:
+ Spark Merge File Work
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ st 80.0
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge8.q.out b/ql/src/test/results/clientpositive/spark/orc_merge8.q.out
new file mode 100644
index 0000000..3be3b07
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge8.q.out
@@ -0,0 +1,130 @@
+PREHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: create table alltypes_orc like alltypes
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: create table alltypes_orc like alltypes
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: alter table alltypes_orc set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes_orc
+POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct<c1:int,c2:string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ]
+PREHOOK: query: insert into table alltypes_orc select * from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: insert into table alltypes_orc select * from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes_orc
+POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct<c1:int,c2:string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ]
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: alter table alltypes_orc concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+Found 1 items
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/13eb4095/ql/src/test/results/clientpositive/spark/orc_merge9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge9.q.out b/ql/src/test/results/clientpositive/spark/orc_merge9.q.out
new file mode 100644
index 0000000..bdf0fd3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/orc_merge9.q.out
@@ -0,0 +1,186 @@
+PREHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+PREHOOK: query: alter table ts_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@ts_merge
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: alter table ts_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@ts_merge
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: insert overwrite table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert overwrite table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@a_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: insert into table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert into table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+Found 2 items
+#### A masked pattern was here ####
[17/23] hive git commit: HIVE-12284: Merge master to Spark branch
10/28/2015 [Spark Branch] update some test result (Reviewed by Chao)
Posted by xu...@apache.org.
HIVE-12284: Merge master to Spark branch 10/28/2015 [Spark Branch] update some test result (Reviewed by Chao)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fd119291
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fd119291
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fd119291
Branch: refs/heads/master
Commit: fd119291482f5fa75a97dda0bf4282b6bd73a970
Parents: c9073aa
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Oct 28 13:53:20 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Oct 28 13:53:20 2015 -0700
----------------------------------------------------------------------
.../spark/vector_inner_join.q.out | 36 ++--
.../spark/vector_outer_join0.q.out | 8 +-
.../spark/vector_outer_join1.q.out | 56 +++---
.../spark/vector_outer_join2.q.out | 24 +--
.../spark/vector_outer_join3.q.out | 72 ++++----
.../spark/vector_outer_join4.q.out | 56 +++---
.../spark/vector_outer_join5.q.out | 176 +++++++++----------
7 files changed, 214 insertions(+), 214 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
index bf7090b..e63e1f1 100644
--- a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
@@ -60,9 +60,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -97,9 +97,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -155,9 +155,9 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -192,9 +192,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -277,9 +277,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -314,9 +314,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -363,9 +363,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -400,9 +400,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -449,9 +449,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -486,9 +486,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -535,9 +535,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -572,9 +572,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -621,9 +621,9 @@ STAGE PLANS:
keys:
0 c (type: int)
1 a (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -658,9 +658,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -707,9 +707,9 @@ STAGE PLANS:
keys:
0 a (type: int)
1 c (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -744,9 +744,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -793,9 +793,9 @@ STAGE PLANS:
keys:
0 a (type: int)
1 c (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -830,9 +830,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
index cc66db5..22c1b6a 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
@@ -87,9 +87,9 @@ STAGE PLANS:
keys:
0 a (type: int)
1 c (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -121,9 +121,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -176,9 +176,9 @@ STAGE PLANS:
keys:
0 a (type: int)
1 c (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -210,9 +210,9 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index cfc4753..25d4d31 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -182,18 +182,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col2 (type: int)
1 _col2 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -203,11 +203,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -217,17 +217,17 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
input vertices:
1 Map 2
- Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 16 Data size: 4632 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 16 Data size: 4632 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -298,18 +298,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -319,11 +319,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -333,17 +333,17 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 2
- Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 16 Data size: 4632 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 16 Data size: 4632 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -506,34 +506,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col1 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -545,11 +545,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), cint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 15 Data size: 3915 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 15 Data size: 4211 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -559,7 +559,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 3
- Statistics: Num rows: 16 Data size: 4306 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 16 Data size: 4632 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -569,7 +569,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 4
- Statistics: Num rows: 17 Data size: 4736 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 17 Data size: 5095 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(), sum(_col0)
mode: hash
@@ -579,10 +579,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0), sum(VALUE._col1)
@@ -596,7 +597,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index 38051fd..063fdde 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -198,34 +198,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cbigint (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col1 (type: bigint)
1 _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -237,11 +237,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int), cbigint (type: bigint)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 20 Data size: 5056 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5237 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -251,7 +251,7 @@ STAGE PLANS:
outputColumnNames: _col1
input vertices:
1 Map 3
- Statistics: Num rows: 22 Data size: 5561 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 22 Data size: 5760 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -261,7 +261,7 @@ STAGE PLANS:
outputColumnNames: _col1
input vertices:
1 Map 4
- Statistics: Num rows: 24 Data size: 6117 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 24 Data size: 6336 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(), sum(_col1)
mode: hash
@@ -271,10 +271,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0), sum(VALUE._col1)
@@ -288,7 +289,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
index b029e1c..b79c590 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -198,34 +198,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cstring1 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col1 (type: string)
1 _col0 (type: string)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -237,11 +237,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int), cstring1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -251,7 +251,7 @@ STAGE PLANS:
outputColumnNames: _col1
input vertices:
1 Map 3
- Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -260,7 +260,7 @@ STAGE PLANS:
1 _col0 (type: string)
input vertices:
1 Map 4
- Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -270,10 +270,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -287,7 +288,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -352,34 +352,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cstring2 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col1 (type: string)
1 _col0 (type: string)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cstring1 (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: string)
1 _col0 (type: string)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -391,11 +391,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cstring1 (type: string), cstring2 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -405,7 +405,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 3
- Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -414,7 +414,7 @@ STAGE PLANS:
1 _col0 (type: string)
input vertices:
1 Map 4
- Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -424,10 +424,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -441,7 +442,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -506,34 +506,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cbigint (type: bigint), cstring2 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col3 (type: string), _col1 (type: bigint)
1 _col1 (type: string), _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int), cstring1 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col2 (type: string), _col0 (type: int)
1 _col1 (type: string), _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -545,11 +545,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 20 Data size: 5040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 20 Data size: 5221 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -559,7 +559,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col2
input vertices:
1 Map 3
- Statistics: Num rows: 22 Data size: 5544 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 22 Data size: 5743 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -568,7 +568,7 @@ STAGE PLANS:
1 _col1 (type: string), _col0 (type: int)
input vertices:
1 Map 4
- Statistics: Num rows: 24 Data size: 6098 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 24 Data size: 6317 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -578,10 +578,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -595,7 +596,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
index 182dbb0..03db229 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -212,18 +212,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col2 (type: int)
1 _col2 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -233,11 +233,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -247,17 +247,17 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23
input vertices:
1 Map 2
- Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 33 Data size: 5054 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 33 Data size: 5054 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -363,18 +363,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -384,11 +384,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -398,17 +398,17 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 2
- Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 33 Data size: 5054 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 33 Data size: 5054 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -876,34 +876,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col1 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -915,11 +915,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: c
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), cint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 30 Data size: 4298 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 30 Data size: 4595 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -929,7 +929,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 3
- Statistics: Num rows: 33 Data size: 4727 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 33 Data size: 5054 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -938,7 +938,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 4
- Statistics: Num rows: 36 Data size: 5199 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 36 Data size: 5559 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -948,10 +948,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -965,7 +966,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
[20/23] hive git commit: HIVE-12330: Fix precommit Spark test part2
(Sergio Pena, reviewd by Szehon Ho)
Posted by xu...@apache.org.
HIVE-12330: Fix precommit Spark test part2 (Sergio Pena, reviewd by Szehon Ho)
Change-Id: Ia4eed857126e637b273a4aa3c3ab12c59146f035
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c771306b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c771306b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c771306b
Branch: refs/heads/master
Commit: c771306b469fdd215797e95c00fc12809b96009b
Parents: cad0ea6
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu Nov 12 16:54:54 2015 -0600
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu Nov 12 16:54:54 2015 -0600
----------------------------------------------------------------------
.../src/test/templates/TestHBaseCliDriver.vm | 63 +++++------------
.../templates/TestHBaseNegativeCliDriver.vm | 64 +++++------------
.../hadoop/hive/hbase/HBaseTestSetup.java | 9 +--
ql/src/test/templates/TestCliDriver.vm | 74 ++++++--------------
ql/src/test/templates/TestCompareCliDriver.vm | 71 ++++++-------------
ql/src/test/templates/TestNegativeCliDriver.vm | 70 +++++++-----------
ql/src/test/templates/TestParseNegative.vm | 65 ++++++-----------
.../ptest2/src/main/resources/batch-exec.vm | 2 -
8 files changed, 127 insertions(+), 291 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/templates/TestHBaseCliDriver.vm b/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
index de0be32..6f4a7c1 100644
--- a/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
+++ b/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
@@ -17,38 +17,25 @@
*/
package org.apache.hadoop.hive.cli;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-import java.io.*;
-import java.util.*;
-
-import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
import org.apache.hadoop.hive.hbase.HBaseTestSetup;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
-public class $className extends TestCase {
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class $className {
private static final String HIVE_ROOT = HBaseQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private HBaseQTestUtil qt;
- private HBaseTestSetup setup;
+ private static HBaseTestSetup setup = new HBaseTestSetup();
- public static class TestHBaseCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testCliDriver_"+tName, (HBaseTestSetup)setup));
- }
- }
-
- public $className(String name, HBaseTestSetup setup) {
- super(name);
- qt = null;
- this.setup = setup;
- }
-
- @Override
- protected void setUp() {
+ @Before
+ public void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -65,12 +52,11 @@ public class $className extends TestCase {
}
}
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -78,23 +64,9 @@ public class $className extends TestCase {
}
}
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
- }
- TestSuite suite = new TestSuite();
- HBaseTestSetup setup = new HBaseTestSetup(suite);
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, setup, new TestHBaseCliDriverAddTestFromQFiles());
- return setup;
+ @AfterClass
+ public static void closeHBaseConnections() throws Exception {
+ setup.tearDown();
}
#foreach ($qf in $qfiles)
@@ -102,6 +74,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm b/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
index b402585..043bd87 100644
--- a/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
+++ b/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
@@ -18,38 +18,25 @@
package org.apache.hadoop.hive.cli;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-import java.io.*;
-import java.util.*;
-
-import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
import org.apache.hadoop.hive.hbase.HBaseTestSetup;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
-public class $className extends TestCase {
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class $className {
private static final String HIVE_ROOT = HBaseQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private HBaseQTestUtil qt;
- private HBaseTestSetup setup;
-
- public static class TestHBaseNegativeCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testCliDriver_"+tName, (HBaseTestSetup)setup));
- }
- }
-
- public $className(String name, HBaseTestSetup setup) {
- super(name);
- qt = null;
- this.setup = setup;
- }
+ private static HBaseTestSetup setup = new HBaseTestSetup();
- @Override
- protected void setUp() {
+ @Before
+ public void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -66,12 +53,11 @@ public class $className extends TestCase {
}
}
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -79,24 +65,9 @@ public class $className extends TestCase {
}
}
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
- }
-
- TestSuite suite = new TestSuite();
- HBaseTestSetup setup = new HBaseTestSetup(suite);
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, setup, new TestHBaseNegativeCliDriverAddTestFromQFiles());
- return setup;
+ @AfterClass
+ public static void closeHBaseConnections() throws Exception {
+ setup.tearDown();
}
#foreach ($qf in $qfiles)
@@ -104,6 +75,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index 300f1cf..e6383dc 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -45,7 +45,7 @@ import org.apache.zookeeper.Watcher;
* HBaseTestSetup defines HBase-specific test fixtures which are
* reused across testcases.
*/
-public class HBaseTestSetup extends TestSetup {
+public class HBaseTestSetup {
private MiniHBaseCluster hbaseCluster;
private int zooKeeperPort;
@@ -54,10 +54,6 @@ public class HBaseTestSetup extends TestSetup {
private static final int NUM_REGIONSERVERS = 1;
- public HBaseTestSetup(Test test) {
- super(test);
- }
-
public HConnection getConnection() {
return this.hbaseConn;
}
@@ -170,8 +166,7 @@ public class HBaseTestSetup extends TestSetup {
return port;
}
- @Override
- protected void tearDown() throws Exception {
+ public void tearDown() throws Exception {
if (hbaseConn != null) {
hbaseConn.close();
hbaseConn = null;
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/ql/src/test/templates/TestCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm
index 01745da..fa638ae 100644
--- a/ql/src/test/templates/TestCliDriver.vm
+++ b/ql/src/test/templates/TestCliDriver.vm
@@ -17,28 +17,21 @@
*/
package org.apache.hadoop.hive.cli;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-import java.io.*;
-import java.util.*;
-
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-public class $className extends TestCase {
+public class $className {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
- public static class TestCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testCliDriver_"+tName));
- }
- }
-
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
@@ -66,12 +59,8 @@ public class $className extends TestCase {
}
}
- public $className(String name) {
- super(name);
- }
-
- @Override
- protected void setUp() {
+ @Before
+ public void setUp() {
try {
qt.clearTestSideEffects();
} catch (Exception e) {
@@ -82,21 +71,11 @@ public class $className extends TestCase {
}
}
- /**
- * Dummy last test. This is only meant to shutdown qt
- */
- public void testCliDriver_shutdown() {
- System.err.println ("Cleaning up " + "$className");
- }
-
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.clearPostTestEffects();
- if (getName().equals("testCliDriver_shutdown"))
- qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -104,24 +83,16 @@ public class $className extends TestCase {
}
}
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
+ @AfterClass
+ public static void shutdown() throws Exception {
+ try {
+ qt.shutdown();
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in shutdown");
}
-
- TestSuite suite = new TestSuite();
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, null, new TestCliDriverAddTestFromQFiles());
- suite.addTest(new $className("testCliDriver_shutdown"));
- return suite;
}
static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
@@ -132,6 +103,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/ql/src/test/templates/TestCompareCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCompareCliDriver.vm b/ql/src/test/templates/TestCompareCliDriver.vm
index 7f849e0..5e44315 100644
--- a/ql/src/test/templates/TestCompareCliDriver.vm
+++ b/ql/src/test/templates/TestCompareCliDriver.vm
@@ -17,29 +17,23 @@
*/
package org.apache.hadoop.hive.cli;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
import java.io.*;
import java.util.*;
-import java.util.Arrays;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.fail;
-public class $className extends TestCase {
+public class $className {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
- public static class TestCompareCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testCompareCliDriver_"+tName));
- }
- }
-
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
@@ -66,12 +60,8 @@ public class $className extends TestCase {
}
}
- public $className(String name) {
- super(name);
- }
-
- @Override
- protected void setUp() {
+ @Before
+ public void setUp() {
try {
qt.clearTestSideEffects();
} catch (Exception e) {
@@ -82,21 +72,11 @@ public class $className extends TestCase {
}
}
- /**
- * Dummy last test. This is only meant to shutdown qt
- */
- public void testCompareCliDriver_shutdown() {
- System.err.println ("Cleaning up " + "$className");
- }
-
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.clearPostTestEffects();
- if (getName().equals("testCompareCliDriver_shutdown"))
- qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -104,24 +84,16 @@ public class $className extends TestCase {
}
}
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
+ @AfterClass
+ public static void shutdown() throws Exception {
+ try {
+ qt.shutdown();
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in shutdown");
}
-
- TestSuite suite = new TestSuite();
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, null, new TestCompareCliDriverAddTestFromQFiles());
- suite.addTest(new $className("testCompareCliDriver_shutdown"));
- return suite;
}
private Map<String, List<String>> versionFiles = new HashMap<String, List<String>>();
@@ -134,6 +106,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testCompareCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/ql/src/test/templates/TestNegativeCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestNegativeCliDriver.vm b/ql/src/test/templates/TestNegativeCliDriver.vm
index 5f8ee8e..85c1e7f 100644
--- a/ql/src/test/templates/TestNegativeCliDriver.vm
+++ b/ql/src/test/templates/TestNegativeCliDriver.vm
@@ -17,28 +17,21 @@
*/
package org.apache.hadoop.hive.cli;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-import java.io.*;
-import java.util.*;
-
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.apache.hadoop.hive.ql.exec.Task;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-public class $className extends TestCase {
+public class $className {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
- public static class TestNegativeCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testNegativeCliDriver_"+tName));
- }
- }
-
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -59,30 +52,22 @@ public class $className extends TestCase {
}
}
- public $className(String name) {
- super(name);
- }
-
- @Override
- protected void setUp() {
+ @Before
+ public void setUp() {
try {
qt.clearTestSideEffects();
- }
- catch (Throwable e) {
+ } catch (Throwable e) {
e.printStackTrace();
System.err.flush();
fail("Unexpected exception in setup");
}
}
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.clearPostTestEffects();
- if (getName().equals("testNegativeCliDriver_shutdown"))
- qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -90,24 +75,16 @@ public class $className extends TestCase {
}
}
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
+ @AfterClass
+ public static void shutdown() throws Exception {
+ try {
+ qt.shutdown();
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in shutdown");
}
-
- TestSuite suite = new TestSuite();
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, null, new TestNegativeCliDriverAddTestFromQFiles());
- suite.addTest(new $className("testNegativeCliDriver_shutdown"));
- return suite;
}
/**
@@ -125,6 +102,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testNegativeCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/ql/src/test/templates/TestParseNegative.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestParseNegative.vm b/ql/src/test/templates/TestParseNegative.vm
index c5e7bdf..a4397f7 100755
--- a/ql/src/test/templates/TestParseNegative.vm
+++ b/ql/src/test/templates/TestParseNegative.vm
@@ -17,27 +17,23 @@
*/
package org.apache.hadoop.hive.ql.parse;
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
import java.io.*;
import java.util.*;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.ql.exec.Task;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-public class $className extends TestCase {
+public class $className {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
-
- public static class TestParseNegativeAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
- public void addTestToSuite(TestSuite suite, Object setup, String tName) {
- suite.addTest(new $className("testParseNegative_"+tName));
- }
- }
static {
@@ -57,18 +53,11 @@ public class $className extends TestCase {
}
}
- public $className(String name) {
- super(name);
- }
-
- @Override
- protected void tearDown() {
+ @After
+ public void tearDown() {
try {
qt.clearPostTestEffects();
- if (getName().equals("testParseNegative_shutdown"))
- qt.shutdown();
- }
- catch (Exception e) {
+ } catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -76,31 +65,16 @@ public class $className extends TestCase {
}
}
- /**
- * Dummy last test. This is only meant to shutdown qt
- */
- public void testParseNegative_shutdown() {
- System.err.println ("Cleaning up " + "$className");
- }
-
- public static Test suite() {
- Set<String> qFilesToExecute = new HashSet<String>();
- String qFiles = System.getProperty("qfile", "").trim();
- if(!qFiles.isEmpty()) {
- for(String qFile : qFiles.split(",")) {
- qFile = qFile.trim();
- if(!qFile.isEmpty()) {
- qFilesToExecute.add(qFile);
- }
- }
+ @AfterClass
+ public static void shutdown() throws Exception {
+ try {
+ qt.shutdown();
+ } catch (Exception e) {
+ System.err.println("Exception: " + e.getMessage());
+ e.printStackTrace();
+ System.err.flush();
+ fail("Unexpected exception in shutdown");
}
-
- TestSuite suite = new TestSuite();
-
- QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
- suite, null, new TestParseNegativeAddTestFromQFiles());
- suite.addTest(new $className("testParseNegative_shutdown"));
- return suite;
}
static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
@@ -111,6 +85,7 @@ public class $className extends TestCase {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
+ @Test
public void testParseNegative_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c771306b/testutils/ptest2/src/main/resources/batch-exec.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/batch-exec.vm b/testutils/ptest2/src/main/resources/batch-exec.vm
index c155851..da3e0ac 100644
--- a/testutils/ptest2/src/main/resources/batch-exec.vm
+++ b/testutils/ptest2/src/main/resources/batch-exec.vm
@@ -62,8 +62,6 @@ then
testModule=./
fi
pushd $testModule
- #clean to force regeneration of class files (maven sometimes skips generation)
- mvn clean -Dmaven.repo.local=$localDir/$instanceName/maven $mavenArgs
timeout 2h mvn -B test -Dmaven.repo.local=$localDir/$instanceName/maven \
$mavenArgs $mavenTestArgs $testArguments 1>$logDir/maven-test.txt 2>&1 </dev/null &
#[[
[06/23] hive git commit: HIVE-9139: Clean up
GenSparkProcContext.clonedReduceSinks and related code [Spark Branch] (Chao
Sun, reviewed by Xuefu Zhang)
Posted by xu...@apache.org.
HIVE-9139: Clean up GenSparkProcContext.clonedReduceSinks and related code [Spark Branch] (Chao Sun, reviewed by Xuefu Zhang)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a8c49ef4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a8c49ef4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a8c49ef4
Branch: refs/heads/master
Commit: a8c49ef419371cd827115f39a2a7b75c544e4eae
Parents: 7252481
Author: Chao Sun <su...@apache.org>
Authored: Mon Aug 17 10:40:53 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Mon Aug 17 10:40:53 2015 -0700
----------------------------------------------------------------------
.../hadoop/hive/ql/parse/spark/GenSparkProcContext.java | 2 --
.../org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java | 7 -------
2 files changed, 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/a8c49ef4/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java
index 0a0c791..62237e1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java
@@ -129,7 +129,6 @@ public class GenSparkProcContext implements NodeProcessorCtx {
public final Map<Operator<?>, BaseWork> unionWorkMap;
public final List<UnionOperator> currentUnionOperators;
public final Set<BaseWork> workWithUnionOperators;
- public final Set<ReduceSinkOperator> clonedReduceSinks;
public final Set<FileSinkOperator> fileSinkSet;
public final Map<FileSinkOperator, List<FileSinkOperator>> fileSinkMap;
@@ -180,7 +179,6 @@ public class GenSparkProcContext implements NodeProcessorCtx {
this.unionWorkMap = new LinkedHashMap<Operator<?>, BaseWork>();
this.currentUnionOperators = new LinkedList<UnionOperator>();
this.workWithUnionOperators = new LinkedHashSet<BaseWork>();
- this.clonedReduceSinks = new LinkedHashSet<ReduceSinkOperator>();
this.fileSinkSet = new LinkedHashSet<FileSinkOperator>();
this.fileSinkMap = new LinkedHashMap<FileSinkOperator, List<FileSinkOperator>>();
this.pruningSinkSet = new LinkedHashSet<Operator<?>>();
http://git-wip-us.apache.org/repos/asf/hive/blob/a8c49ef4/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
index 3dd6d92..2d5bb62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkWork.java
@@ -94,12 +94,6 @@ public class GenSparkWork implements NodeProcessor {
LOG.debug("Root operator: " + root);
LOG.debug("Leaf operator: " + operator);
- if (context.clonedReduceSinks.contains(operator)) {
- // if we're visiting a terminal we've created ourselves,
- // just skip and keep going
- return null;
- }
-
SparkWork sparkWork = context.currentTask.getWork();
SMBMapJoinOperator smbOp = GenSparkUtils.getChildOperator(root, SMBMapJoinOperator.class);
@@ -192,7 +186,6 @@ public class GenSparkWork implements NodeProcessor {
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(
(ReduceSinkDesc)r.getConf().clone(), r.getParentOperators());
- context.clonedReduceSinks.add(r);
}
r.getConf().setOutputName(work.getName());
}
[16/23] hive git commit: HIVE-12284: Merge master to Spark branch
10/28/2015 [Spark Branch] update some test result (Reviewed by Chao)
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fd119291/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index 2c7cd5b..2b13dc6 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -90,18 +90,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: st
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -113,11 +113,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -126,7 +126,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -136,10 +136,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -153,7 +154,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -208,11 +208,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {(_col1 = 2)}
@@ -220,9 +220,9 @@ STAGE PLANS:
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -234,11 +234,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -250,7 +250,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -260,10 +260,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -277,7 +278,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -332,11 +332,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {((UDFToInteger(_col0) pmod 4) = _col1)}
@@ -344,9 +344,9 @@ STAGE PLANS:
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -358,11 +358,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -374,7 +374,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -384,10 +384,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -401,7 +402,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -456,11 +456,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {(_col0 < 100)}
@@ -468,9 +468,9 @@ STAGE PLANS:
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -482,11 +482,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -498,7 +498,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -508,10 +508,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -525,7 +526,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -584,34 +584,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cbigint (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 380 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 UDFToLong(_col1) (type: bigint)
1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: tinyint)
1 _col0 (type: tinyint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -623,11 +623,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ctinyint (type: tinyint), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2018 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2027 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -637,7 +637,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 2219 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 2229 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -646,7 +646,7 @@ STAGE PLANS:
1 _col0 (type: tinyint)
input vertices:
1 Map 4
- Statistics: Num rows: 7329 Data size: 2440 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 7329 Data size: 2451 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -656,10 +656,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -673,7 +674,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -790,18 +790,18 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: st
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -813,11 +813,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -826,7 +826,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 3072 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -836,10 +836,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -853,7 +854,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -908,11 +908,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {(_col1 = 2)}
@@ -920,9 +920,9 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -934,11 +934,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -950,7 +950,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 3072 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -960,10 +960,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -977,7 +978,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -1032,11 +1032,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {((_col0 pmod 4) = _col1)}
@@ -1044,9 +1044,9 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -1058,11 +1058,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -1074,7 +1074,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 3072 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -1084,10 +1084,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -1101,7 +1102,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -1156,11 +1156,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
filter predicates:
0 {(_col0 < 3)}
@@ -1168,9 +1168,9 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -1182,11 +1182,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -1198,7 +1198,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 3072 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -1208,10 +1208,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -1225,7 +1226,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
@@ -1284,34 +1284,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: sm
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cbigint (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 100 Data size: 352 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 100 Data size: 363 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 UDFToLong(_col1) (type: bigint)
1 (_col0 pmod UDFToLong(8)) (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Map 4
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Spark HashTable Sink Operator
keys:
0 _col0 (type: int)
1 _col0 (type: int)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Stage: Stage-1
Spark
@@ -1323,11 +1323,11 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: s
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: cmodtinyint (type: int), cmodint (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 6058 Data size: 2785 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6058 Data size: 2793 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -1337,7 +1337,7 @@ STAGE PLANS:
outputColumnNames: _col0
input vertices:
1 Map 3
- Statistics: Num rows: 6663 Data size: 3063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 6663 Data size: 3072 Basic stats: COMPLETE Column stats: NONE
Map Join Operator
condition map:
Left Outer Join0 to 1
@@ -1346,7 +1346,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 4
- Statistics: Num rows: 7329 Data size: 3369 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 7329 Data size: 3379 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -1356,10 +1356,11 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
Local Work:
Map Reduce Local Work
- Execution mode: vectorized
Reducer 2
+ Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
@@ -1373,7 +1374,6 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Execution mode: vectorized
Stage: Stage-0
Fetch Operator
[05/23] hive git commit: HIVE-11466: HIVE-10166 generates more data
on hive.log causing Jenkins to fill all the disk (Reviewed by Prasanth)
Posted by xu...@apache.org.
HIVE-11466: HIVE-10166 generates more data on hive.log causing Jenkins to fill all the disk (Reviewed by Prasanth)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/72524817
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/72524817
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/72524817
Branch: refs/heads/master
Commit: 725248174ee6fcd65e7f2cf0f5a4d176856cf081
Parents: 80f548a
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Fri Aug 7 17:38:07 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Fri Aug 7 17:38:44 2015 -0700
----------------------------------------------------------------------
.../org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/72524817/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
index ca1eae6..6c9efba 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
@@ -93,7 +93,6 @@ public class ThriftBinaryCLIService extends ThriftCLIService {
// TCP Server
server = new TThreadPoolServer(sargs);
server.setServerEventHandler(serverEventHandler);
- server.serve();
String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port "
+ portNum + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
LOG.info(msg);
[23/23] hive git commit: Revert "HIVE-12330: Fix precommit Spark test
part2 (Sergio Pena, reviewd by Szehon Ho)"
Posted by xu...@apache.org.
Revert "HIVE-12330: Fix precommit Spark test part2 (Sergio Pena, reviewd by Szehon Ho)"
This reverts commit c771306b469fdd215797e95c00fc12809b96009b.
Patch attached in HIVE-12434 doesn't include this commit.
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fb944ee4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fb944ee4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fb944ee4
Branch: refs/heads/master
Commit: fb944ee4963927ee363e597c9ee55868831503bc
Parents: de1b22f
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Wed Nov 18 13:44:21 2015 -0800
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Wed Nov 18 13:44:21 2015 -0800
----------------------------------------------------------------------
.../src/test/templates/TestHBaseCliDriver.vm | 63 ++++++++++++-----
.../templates/TestHBaseNegativeCliDriver.vm | 64 ++++++++++++-----
.../hadoop/hive/hbase/HBaseTestSetup.java | 9 ++-
ql/src/test/templates/TestCliDriver.vm | 74 ++++++++++++++------
ql/src/test/templates/TestCompareCliDriver.vm | 71 +++++++++++++------
ql/src/test/templates/TestNegativeCliDriver.vm | 70 +++++++++++-------
ql/src/test/templates/TestParseNegative.vm | 65 +++++++++++------
.../ptest2/src/main/resources/batch-exec.vm | 2 +
8 files changed, 291 insertions(+), 127 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/templates/TestHBaseCliDriver.vm b/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
index 6f4a7c1..de0be32 100644
--- a/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
+++ b/hbase-handler/src/test/templates/TestHBaseCliDriver.vm
@@ -17,25 +17,38 @@
*/
package org.apache.hadoop.hive.cli;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = HBaseQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private HBaseQTestUtil qt;
- private static HBaseTestSetup setup = new HBaseTestSetup();
+ private HBaseTestSetup setup;
- @Before
- public void setUp() {
+ public static class TestHBaseCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testCliDriver_"+tName, (HBaseTestSetup)setup));
+ }
+ }
+
+ public $className(String name, HBaseTestSetup setup) {
+ super(name);
+ qt = null;
+ this.setup = setup;
+ }
+
+ @Override
+ protected void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -52,11 +65,12 @@ public class $className {
}
}
- @After
- public void tearDown() {
+ @Override
+ protected void tearDown() {
try {
qt.shutdown();
- } catch (Exception e) {
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -64,9 +78,23 @@ public class $className {
}
}
- @AfterClass
- public static void closeHBaseConnections() throws Exception {
- setup.tearDown();
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
+ }
+ TestSuite suite = new TestSuite();
+ HBaseTestSetup setup = new HBaseTestSetup(suite);
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, setup, new TestHBaseCliDriverAddTestFromQFiles());
+ return setup;
}
#foreach ($qf in $qfiles)
@@ -74,7 +102,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm b/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
index 043bd87..b402585 100644
--- a/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
+++ b/hbase-handler/src/test/templates/TestHBaseNegativeCliDriver.vm
@@ -18,25 +18,38 @@
package org.apache.hadoop.hive.cli;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.hbase.HBaseQTestUtil;
import org.apache.hadoop.hive.hbase.HBaseTestSetup;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = HBaseQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private HBaseQTestUtil qt;
- private static HBaseTestSetup setup = new HBaseTestSetup();
+ private HBaseTestSetup setup;
+
+ public static class TestHBaseNegativeCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testCliDriver_"+tName, (HBaseTestSetup)setup));
+ }
+ }
+
+ public $className(String name, HBaseTestSetup setup) {
+ super(name);
+ qt = null;
+ this.setup = setup;
+ }
- @Before
- public void setUp() {
+ @Override
+ protected void setUp() {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -53,11 +66,12 @@ public class $className {
}
}
- @After
- public void tearDown() {
+ @Override
+ protected void tearDown() {
try {
qt.shutdown();
- } catch (Exception e) {
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -65,9 +79,24 @@ public class $className {
}
}
- @AfterClass
- public static void closeHBaseConnections() throws Exception {
- setup.tearDown();
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
+ }
+
+ TestSuite suite = new TestSuite();
+ HBaseTestSetup setup = new HBaseTestSetup(suite);
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, setup, new TestHBaseNegativeCliDriverAddTestFromQFiles());
+ return setup;
}
#foreach ($qf in $qfiles)
@@ -75,7 +104,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index e6383dc..300f1cf 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -45,7 +45,7 @@ import org.apache.zookeeper.Watcher;
* HBaseTestSetup defines HBase-specific test fixtures which are
* reused across testcases.
*/
-public class HBaseTestSetup {
+public class HBaseTestSetup extends TestSetup {
private MiniHBaseCluster hbaseCluster;
private int zooKeeperPort;
@@ -54,6 +54,10 @@ public class HBaseTestSetup {
private static final int NUM_REGIONSERVERS = 1;
+ public HBaseTestSetup(Test test) {
+ super(test);
+ }
+
public HConnection getConnection() {
return this.hbaseConn;
}
@@ -166,7 +170,8 @@ public class HBaseTestSetup {
return port;
}
- public void tearDown() throws Exception {
+ @Override
+ protected void tearDown() throws Exception {
if (hbaseConn != null) {
hbaseConn.close();
hbaseConn = null;
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/ql/src/test/templates/TestCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm
index fa638ae..01745da 100644
--- a/ql/src/test/templates/TestCliDriver.vm
+++ b/ql/src/test/templates/TestCliDriver.vm
@@ -17,21 +17,28 @@
*/
package org.apache.hadoop.hive.cli;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.apache.hadoop.hive.ql.session.SessionState;
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
+ public static class TestCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testCliDriver_"+tName));
+ }
+ }
+
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
@@ -59,8 +66,12 @@ public class $className {
}
}
- @Before
- public void setUp() {
+ public $className(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void setUp() {
try {
qt.clearTestSideEffects();
} catch (Exception e) {
@@ -71,11 +82,21 @@ public class $className {
}
}
- @After
- public void tearDown() {
+ /**
+ * Dummy last test. This is only meant to shutdown qt
+ */
+ public void testCliDriver_shutdown() {
+ System.err.println ("Cleaning up " + "$className");
+ }
+
+ @Override
+ protected void tearDown() {
try {
qt.clearPostTestEffects();
- } catch (Exception e) {
+ if (getName().equals("testCliDriver_shutdown"))
+ qt.shutdown();
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -83,16 +104,24 @@ public class $className {
}
}
- @AfterClass
- public static void shutdown() throws Exception {
- try {
- qt.shutdown();
- } catch (Exception e) {
- System.err.println("Exception: " + e.getMessage());
- e.printStackTrace();
- System.err.flush();
- fail("Unexpected exception in shutdown");
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
}
+
+ TestSuite suite = new TestSuite();
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, null, new TestCliDriverAddTestFromQFiles());
+ suite.addTest(new $className("testCliDriver_shutdown"));
+ return suite;
}
static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
@@ -103,7 +132,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/ql/src/test/templates/TestCompareCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCompareCliDriver.vm b/ql/src/test/templates/TestCompareCliDriver.vm
index 5e44315..7f849e0 100644
--- a/ql/src/test/templates/TestCompareCliDriver.vm
+++ b/ql/src/test/templates/TestCompareCliDriver.vm
@@ -17,23 +17,29 @@
*/
package org.apache.hadoop.hive.cli;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
import java.io.*;
import java.util.*;
+import java.util.Arrays;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.fail;
+import org.apache.hadoop.hive.ql.session.SessionState;
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
+ public static class TestCompareCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testCompareCliDriver_"+tName));
+ }
+ }
+
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
@@ -60,8 +66,12 @@ public class $className {
}
}
- @Before
- public void setUp() {
+ public $className(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void setUp() {
try {
qt.clearTestSideEffects();
} catch (Exception e) {
@@ -72,11 +82,21 @@ public class $className {
}
}
- @After
- public void tearDown() {
+ /**
+ * Dummy last test. This is only meant to shutdown qt
+ */
+ public void testCompareCliDriver_shutdown() {
+ System.err.println ("Cleaning up " + "$className");
+ }
+
+ @Override
+ protected void tearDown() {
try {
qt.clearPostTestEffects();
- } catch (Exception e) {
+ if (getName().equals("testCompareCliDriver_shutdown"))
+ qt.shutdown();
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -84,16 +104,24 @@ public class $className {
}
}
- @AfterClass
- public static void shutdown() throws Exception {
- try {
- qt.shutdown();
- } catch (Exception e) {
- System.err.println("Exception: " + e.getMessage());
- e.printStackTrace();
- System.err.flush();
- fail("Unexpected exception in shutdown");
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
}
+
+ TestSuite suite = new TestSuite();
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, null, new TestCompareCliDriverAddTestFromQFiles());
+ suite.addTest(new $className("testCompareCliDriver_shutdown"));
+ return suite;
}
private Map<String, List<String>> versionFiles = new HashMap<String, List<String>>();
@@ -106,7 +134,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testCompareCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/ql/src/test/templates/TestNegativeCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestNegativeCliDriver.vm b/ql/src/test/templates/TestNegativeCliDriver.vm
index 85c1e7f..5f8ee8e 100644
--- a/ql/src/test/templates/TestNegativeCliDriver.vm
+++ b/ql/src/test/templates/TestNegativeCliDriver.vm
@@ -17,21 +17,28 @@
*/
package org.apache.hadoop.hive.cli;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.apache.hadoop.hive.ql.exec.Task;
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
+ public static class TestNegativeCliDriverAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testNegativeCliDriver_"+tName));
+ }
+ }
+
static {
MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
String initScript = "$initScript";
@@ -52,22 +59,30 @@ public class $className {
}
}
- @Before
- public void setUp() {
+ public $className(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void setUp() {
try {
qt.clearTestSideEffects();
- } catch (Throwable e) {
+ }
+ catch (Throwable e) {
e.printStackTrace();
System.err.flush();
fail("Unexpected exception in setup");
}
}
- @After
- public void tearDown() {
+ @Override
+ protected void tearDown() {
try {
qt.clearPostTestEffects();
- } catch (Exception e) {
+ if (getName().equals("testNegativeCliDriver_shutdown"))
+ qt.shutdown();
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -75,16 +90,24 @@ public class $className {
}
}
- @AfterClass
- public static void shutdown() throws Exception {
- try {
- qt.shutdown();
- } catch (Exception e) {
- System.err.println("Exception: " + e.getMessage());
- e.printStackTrace();
- System.err.flush();
- fail("Unexpected exception in shutdown");
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
}
+
+ TestSuite suite = new TestSuite();
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, null, new TestNegativeCliDriverAddTestFromQFiles());
+ suite.addTest(new $className("testNegativeCliDriver_shutdown"));
+ return suite;
}
/**
@@ -102,7 +125,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testNegativeCliDriver_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/ql/src/test/templates/TestParseNegative.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestParseNegative.vm b/ql/src/test/templates/TestParseNegative.vm
index a4397f7..c5e7bdf 100755
--- a/ql/src/test/templates/TestParseNegative.vm
+++ b/ql/src/test/templates/TestParseNegative.vm
@@ -17,23 +17,27 @@
*/
package org.apache.hadoop.hive.ql.parse;
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
import java.io.*;
import java.util.*;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.ql.exec.Task;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-public class $className {
+public class $className extends TestCase {
private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
private static QTestUtil qt;
+
+ public static class TestParseNegativeAddTestFromQFiles implements QTestUtil.SuiteAddTestFunctor {
+ public void addTestToSuite(TestSuite suite, Object setup, String tName) {
+ suite.addTest(new $className("testParseNegative_"+tName));
+ }
+ }
static {
@@ -53,11 +57,18 @@ public class $className {
}
}
- @After
- public void tearDown() {
+ public $className(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void tearDown() {
try {
qt.clearPostTestEffects();
- } catch (Exception e) {
+ if (getName().equals("testParseNegative_shutdown"))
+ qt.shutdown();
+ }
+ catch (Exception e) {
System.err.println("Exception: " + e.getMessage());
e.printStackTrace();
System.err.flush();
@@ -65,16 +76,31 @@ public class $className {
}
}
- @AfterClass
- public static void shutdown() throws Exception {
- try {
- qt.shutdown();
- } catch (Exception e) {
- System.err.println("Exception: " + e.getMessage());
- e.printStackTrace();
- System.err.flush();
- fail("Unexpected exception in shutdown");
+ /**
+ * Dummy last test. This is only meant to shutdown qt
+ */
+ public void testParseNegative_shutdown() {
+ System.err.println ("Cleaning up " + "$className");
+ }
+
+ public static Test suite() {
+ Set<String> qFilesToExecute = new HashSet<String>();
+ String qFiles = System.getProperty("qfile", "").trim();
+ if(!qFiles.isEmpty()) {
+ for(String qFile : qFiles.split(",")) {
+ qFile = qFile.trim();
+ if(!qFile.isEmpty()) {
+ qFilesToExecute.add(qFile);
+ }
+ }
}
+
+ TestSuite suite = new TestSuite();
+
+ QTestUtil.addTestsToSuiteFromQfileNames("$qFileNamesFile", qFilesToExecute,
+ suite, null, new TestParseNegativeAddTestFromQFiles());
+ suite.addTest(new $className("testParseNegative_shutdown"));
+ return suite;
}
static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
@@ -85,7 +111,6 @@ public class $className {
#set ($eidx = $fname.indexOf('.'))
#set ($tname = $fname.substring(0, $eidx))
#set ($fpath = $qfilesMap.get($fname))
- @Test
public void testParseNegative_$tname() throws Exception {
runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/fb944ee4/testutils/ptest2/src/main/resources/batch-exec.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/batch-exec.vm b/testutils/ptest2/src/main/resources/batch-exec.vm
index da3e0ac..c155851 100644
--- a/testutils/ptest2/src/main/resources/batch-exec.vm
+++ b/testutils/ptest2/src/main/resources/batch-exec.vm
@@ -62,6 +62,8 @@ then
testModule=./
fi
pushd $testModule
+ #clean to force regeneration of class files (maven sometimes skips generation)
+ mvn clean -Dmaven.repo.local=$localDir/$instanceName/maven $mavenArgs
timeout 2h mvn -B test -Dmaven.repo.local=$localDir/$instanceName/maven \
$mavenArgs $mavenTestArgs $testArguments 1>$logDir/maven-test.txt 2>&1 </dev/null &
#[[
[14/23] hive git commit: HIVE-12284: Merge branch 'master' into spark
Posted by xu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --cc itests/src/test/resources/testconfiguration.properties
index 72dbcec,13efc58..e927955
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@@ -1167,20 -1227,7 +1227,18 @@@ miniSparkOnYarn.query.files=auto_sortme
load_fs2.q,\
load_hdfs_file_with_space_in_the_name.q,\
optrstat_groupby.q,\
+ orc_merge1.q,\
+ orc_merge2.q,\
+ orc_merge3.q,\
+ orc_merge4.q,\
+ orc_merge5.q,\
+ orc_merge6.q,\
+ orc_merge7.q,\
+ orc_merge8.q,\
+ orc_merge9.q,\
+ orc_merge_incompat1.q,\
+ orc_merge_incompat2.q,\
parallel_orderby.q,\
- ql_rewrite_gbtoidx.q,\
- ql_rewrite_gbtoidx_cbo_1.q,\
quotedid_smb.q,\
reduce_deduplicate.q,\
remote_script.q,\
@@@ -1194,15 -1240,14 +1251,21 @@@
stats_counter_partitioned.q,\
temp_table_external.q,\
truncate_column_buckets.q,\
- uber_reduce.q
+ uber_reduce.q,\
+ vector_inner_join.q,\
+ vector_outer_join0.q,\
+ vector_outer_join1.q,\
+ vector_outer_join2.q,\
+ vector_outer_join3.q,\
+ vector_outer_join4.q,\
+ vector_outer_join5.q
+ # These tests are removed from miniSparkOnYarn.query.files
+ # ql_rewrite_gbtoidx.q,\
+ # ql_rewrite_gbtoidx_cbo_1.q,\
+ # smb_mapjoin_8.q,\
+
+
spark.query.negative.files=groupby2_map_skew_multi_distinct.q,\
groupby2_multi_distinct.q,\
groupby3_map_skew_multi_distinct.q,\
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index 0cd4238,3b3303c..0d9c9a3
--- a/pom.xml
+++ b/pom.xml
@@@ -159,9 -158,9 +158,9 @@@
<stax.version>1.0.1</stax.version>
<slf4j.version>1.7.5</slf4j.version>
<ST4.version>4.0.4</ST4.version>
- <tez.version>0.5.2</tez.version>
+ <tez.version>0.8.1-alpha</tez.version>
<super-csv.version>2.2.0</super-csv.version>
- <spark.version>1.4.0</spark.version>
+ <spark.version>1.5.0</spark.version>
<scala.binary.version>2.10</scala.binary.version>
<scala.version>2.10.4</scala.version>
<tempus-fugit.version>1.1</tempus-fugit.version>
@@@ -222,9 -222,18 +222,8 @@@
<enabled>false</enabled>
</snapshots>
</repository>
- <repository>
- <id>spark-1.3</id>
- <url>https://s3-us-west-1.amazonaws.com/hive-spark/maven2/spark_2.10-1.3-rc1/</url>
- <releases>
- <enabled>true</enabled>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
</repositories>
- <!-- Hadoop dependency management is done at the bottom under profiles -->
<dependencyManagement>
<dependencies>
<!-- dependencies are always listed in sorted order by groupId, artifectId -->
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index d2c5245,085ad9e..2ab9c2d
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@@ -24,11 -23,8 +24,9 @@@ import java.util.List
import java.util.Map;
import java.util.Set;
- import com.google.common.base.Preconditions;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper;
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c9073aad/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------