You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2017/05/02 02:21:50 UTC

[1/2] hive git commit: Revert "HIVE-11133: Support hive.explain.user for Spark (Sahil via Xuefu)"

Repository: hive
Updated Branches:
  refs/heads/master 5ab03cba5 -> 0b7e9105d


http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
deleted file mode 100644
index ca0910a..0000000
--- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
+++ /dev/null
@@ -1,5921 +0,0 @@
-PREHOOK: query: explain create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
-PREHOOK: type: CREATETABLE
-POSTHOOK: query: explain create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
-POSTHOOK: type: CREATETABLE
-Stage-0
-  Create Table Operator:
-    name:default.src_orc_merge_test_part
-
-PREHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_orc_merge_test_part
-POSTHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_orc_merge_test_part
-PREHOOK: query: alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Output: default@src_orc_merge_test_part
-POSTHOOK: query: alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Output: default@src_orc_merge_test_part
-POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
-PREHOOK: query: desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@src_orc_merge_test_part
-POSTHOOK: query: desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@src_orc_merge_test_part
-key                 	int                 	                    
-value               	string              	                    
-ds                  	string              	                    
-ts                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-ts                  	string              	                    
-	 	 
-#### A masked pattern was here ####
-PREHOOK: query: explain insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src
-PREHOOK: type: QUERY
-POSTHOOK: query: explain insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Stage-2
-  Stats-Aggr Operator
-    Stage-0
-      Move Operator
-        table:{"name:":"default.src_orc_merge_test_part"}
-        Stage-1
-          Map 1
-          File Output Operator [FS_3]
-            table:{"name:":"default.src_orc_merge_test_part"}
-            Select Operator [SEL_1] (rows=500 width=10)
-              Output:["_col0","_col1"]
-              TableScan [TS_0] (rows=500 width=10)
-                default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-
-PREHOOK: query: insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
-POSTHOOK: query: insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
-POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100
-PREHOOK: type: QUERY
-POSTHOOK: query: explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-
-Stage-2
-  Stats-Aggr Operator
-    Stage-0
-      Move Operator
-        table:{"name:":"default.src_orc_merge_test_part"}
-        Stage-1
-          Reducer 2
-          File Output Operator [FS_7]
-            table:{"name:":"default.src_orc_merge_test_part"}
-            Select Operator [SEL_6] (rows=100 width=10)
-              Output:["_col0","_col1"]
-              Limit [LIM_5] (rows=100 width=10)
-                Number of rows:100
-                Select Operator [SEL_4] (rows=100 width=10)
-                  Output:["_col0","_col1"]
-                <-Map 1 [GROUP]
-                  GROUP [RS_3]
-                    Limit [LIM_2] (rows=100 width=10)
-                      Number of rows:100
-                      Select Operator [SEL_1] (rows=500 width=10)
-                        Output:["_col0","_col1"]
-                        TableScan [TS_0] (rows=500 width=10)
-                          default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-
-PREHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Stage-0
-  Fetch Operator
-    limit:1
-
-PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_8]
-        Group By Operator [GBY_6] (rows=1 width=16)
-          Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-        <-Map 1 [GROUP]
-          GROUP [RS_5]
-            Group By Operator [GBY_4] (rows=1 width=16)
-              Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-              Select Operator [SEL_2] (rows=500 width=94)
-                Output:["_col0","_col1"]
-                TableScan [TS_0] (rows=500 width=94)
-                  default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-
-PREHOOK: query: alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate
-PREHOOK: type: ALTER_PARTITION_MERGE
-PREHOOK: Input: default@src_orc_merge_test_part
-PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
-POSTHOOK: query: alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate
-POSTHOOK: type: ALTER_PARTITION_MERGE
-POSTHOOK: Input: default@src_orc_merge_test_part
-POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
-PREHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_8]
-        Group By Operator [GBY_6] (rows=1 width=8)
-          Output:["_col0"],aggregations:["count(VALUE._col0)"]
-        <-Map 1 [GROUP]
-          GROUP [RS_5]
-            Group By Operator [GBY_4] (rows=1 width=8)
-              Output:["_col0"],aggregations:["count(1)"]
-              Select Operator [SEL_2] (rows=500 width=94)
-                TableScan [TS_0] (rows=500 width=94)
-                  default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE
-
-PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_8]
-        Group By Operator [GBY_6] (rows=1 width=16)
-          Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-        <-Map 1 [GROUP]
-          GROUP [RS_5]
-            Group By Operator [GBY_4] (rows=1 width=16)
-              Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-              Select Operator [SEL_2] (rows=500 width=94)
-                Output:["_col0","_col1"]
-                TableScan [TS_0] (rows=500 width=94)
-                  default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-
-PREHOOK: query: drop table src_orc_merge_test_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@src_orc_merge_test_part
-PREHOOK: Output: default@src_orc_merge_test_part
-POSTHOOK: query: drop table src_orc_merge_test_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@src_orc_merge_test_part
-POSTHOOK: Output: default@src_orc_merge_test_part
-Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2))
-from (
-select src1.key as k1, src1.value as v1,
-       src2.key as k2, src2.value as v2 FROM
-  (select * FROM src WHERE src.key < 10) src1
-    JOIN
-  (select * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-) a
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2))
-from (
-select src1.key as k1, src1.value as v1,
-       src2.key as k2, src2.value as v2 FROM
-  (select * FROM src WHERE src.key < 10) src1
-    JOIN
-  (select * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-) a
-POSTHOOK: type: QUERY
-Plan not optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_17]
-        Group By Operator [GBY_15] (rows=1 width=8)
-          Output:["_col0"],aggregations:["sum(VALUE._col0)"]
-        <-Reducer 2 [GROUP]
-          GROUP [RS_14]
-            Group By Operator [GBY_13] (rows=1 width=8)
-              Output:["_col0"],aggregations:["sum(hash(_col0,_col1,_col2,_col3))"]
-              Select Operator [SEL_11] (rows=27556 width=22)
-                Output:["_col0","_col1","_col2","_col3"]
-              <-Map 1 [PARTITION-LEVEL SORT]
-                PARTITION-LEVEL SORT [RS_10]
-                  Map Join Operator [MAPJOIN_20] (rows=27556 width=22)
-                    Conds:(Inner),Output:["_col0","_col1","_col2","_col3"]
-                  <-Select Operator [SEL_2] (rows=166 width=10)
-                      Output:["_col0","_col1"]
-                      Filter Operator [FIL_18] (rows=166 width=10)
-                        predicate:(key < 10)
-                        TableScan [TS_0] (rows=500 width=10)
-                          default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-                Map Reduce Local Work
-        Stage-2
-          Map 4
-          keys: [HASHTABLESINK_22]
-            Select Operator [SEL_5] (rows=166 width=10)
-              Output:["_col0","_col1"]
-              Filter Operator [FIL_19] (rows=166 width=10)
-                predicate:(key < 10)
-                TableScan [TS_3] (rows=500 width=10)
-                  default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
-          Map Reduce Local Work
-
-PREHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_7]
-        Select Operator [SEL_5] (rows=10 width=97)
-          Output:["_col0","_col1","_col2"]
-          Group By Operator [GBY_4] (rows=10 width=101)
-            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-          <-Map 1 [GROUP]
-            GROUP [RS_3]
-              PartitionCols:_col0, _col1, _col2
-              Group By Operator [GBY_2] (rows=10 width=101)
-                Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                Select Operator [SEL_1] (rows=20 width=88)
-                  Output:["key","c_int","c_float"]
-                  TableScan [TS_0] (rows=20 width=88)
-                    default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_12]
-        Select Operator [SEL_11] (rows=5 width=20)
-          Output:["_col0","_col1","_col2"]
-          Group By Operator [GBY_10] (rows=5 width=20)
-            Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-          <-Reducer 2 [GROUP]
-            GROUP [RS_9]
-              PartitionCols:_col0, _col1
-              Group By Operator [GBY_8] (rows=5 width=20)
-                Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col0, _col1
-                Select Operator [SEL_5] (rows=10 width=101)
-                  Output:["_col0","_col1"]
-                  Group By Operator [GBY_4] (rows=10 width=101)
-                    Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                  <-Map 1 [GROUP]
-                    GROUP [RS_3]
-                      PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_2] (rows=10 width=101)
-                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                        Select Operator [SEL_1] (rows=20 width=88)
-                          Output:["key","c_int","c_float"]
-                          TableScan [TS_0] (rows=20 width=88)
-                            default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 10 <- Reducer 9 (SORT)
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Reducer 10 (PARTITION-LEVEL SORT), Reducer 7 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 4 <- Reducer 3 (SORT)
-Reducer 6 <- Map 5 (GROUP)
-Reducer 7 <- Reducer 6 (SORT)
-Reducer 9 <- Map 8 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_38]
-        Select Operator [SEL_37] (rows=1 width=20)
-          Output:["_col0","_col1","_col2"]
-        <-Reducer 3 [SORT]
-          SORT [RS_36]
-            Select Operator [SEL_35] (rows=1 width=20)
-              Output:["_col0","_col1","_col2"]
-              Group By Operator [GBY_34] (rows=1 width=20)
-                Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-              <-Reducer 2 [GROUP]
-                GROUP [RS_33]
-                  PartitionCols:_col0, _col1
-                  Group By Operator [GBY_32] (rows=1 width=20)
-                    Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col4, _col1
-                    Select Operator [SEL_31] (rows=1 width=20)
-                      Output:["_col1","_col4"]
-                      Filter Operator [FIL_29] (rows=1 width=20)
-                        predicate:(((_col3 + _col6) >= 0) and ((_col3 > 0) or (_col1 >= 0)))
-                        Join Operator [JOIN_28] (rows=3 width=20)
-                          Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                        <-Map 1 [PARTITION-LEVEL SORT]
-                          PARTITION-LEVEL SORT [RS_25]
-                            PartitionCols:_col0
-                            Select Operator [SEL_2] (rows=18 width=84)
-                              Output:["_col0","_col1"]
-                              Filter Operator [FIL_39] (rows=18 width=84)
-                                predicate:key is not null
-                                TableScan [TS_0] (rows=20 width=84)
-                                  default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                        <-Reducer 10 [PARTITION-LEVEL SORT]
-                          PARTITION-LEVEL SORT [RS_27]
-                            PartitionCols:_col0
-                            Select Operator [SEL_23] (rows=1 width=89)
-                              Output:["_col0","_col1"]
-                            <-Reducer 9 [SORT]
-                              SORT [RS_22]
-                                Select Operator [SEL_20] (rows=1 width=105)
-                                  Output:["_col0","_col1","_col2","_col3"]
-                                  Group By Operator [GBY_19] (rows=1 width=101)
-                                    Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                  <-Map 8 [GROUP]
-                                    GROUP [RS_18]
-                                      PartitionCols:_col0, _col1, _col2
-                                      Group By Operator [GBY_17] (rows=2 width=101)
-                                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                        Filter Operator [FIL_41] (rows=5 width=93)
-                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                                          TableScan [TS_14] (rows=20 width=88)
-                                            default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                        <-Reducer 7 [PARTITION-LEVEL SORT]
-                          PARTITION-LEVEL SORT [RS_26]
-                            PartitionCols:_col0
-                            Select Operator [SEL_12] (rows=1 width=97)
-                              Output:["_col0","_col1","_col2"]
-                            <-Reducer 6 [SORT]
-                              SORT [RS_11]
-                                Select Operator [SEL_9] (rows=1 width=97)
-                                  Output:["_col0","_col1","_col2"]
-                                  Group By Operator [GBY_8] (rows=1 width=101)
-                                    Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                  <-Map 5 [GROUP]
-                                    GROUP [RS_7]
-                                      PartitionCols:_col0, _col1, _col2
-                                      Group By Operator [GBY_6] (rows=2 width=101)
-                                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                        Filter Operator [FIL_40] (rows=5 width=93)
-                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                                          TableScan [TS_3] (rows=20 width=88)
-                                            default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int desc
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int desc
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Reducer 7 (PARTITION-LEVEL SORT), Reducer 9 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 4 <- Reducer 3 (SORT)
-Reducer 6 <- Map 5 (GROUP)
-Reducer 7 <- Reducer 6 (SORT)
-Reducer 9 <- Map 8 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_37]
-        Select Operator [SEL_36] (rows=1 width=20)
-          Output:["_col0","_col1","_col2"]
-        <-Reducer 3 [SORT]
-          SORT [RS_35]
-            Group By Operator [GBY_33] (rows=1 width=20)
-              Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-            <-Reducer 2 [GROUP]
-              GROUP [RS_32]
-                PartitionCols:_col0, _col1
-                Group By Operator [GBY_31] (rows=1 width=20)
-                  Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col4
-                  Select Operator [SEL_30] (rows=1 width=20)
-                    Output:["_col1","_col4"]
-                    Filter Operator [FIL_26] (rows=1 width=20)
-                      predicate:(((_col3 + _col6) >= 0) and ((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1)) and ((_col3 > 0) or (_col1 >= 0)))
-                      Join Operator [JOIN_25] (rows=3 width=20)
-                        Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                      <-Map 1 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_22]
-                          PartitionCols:_col0
-                          Select Operator [SEL_2] (rows=18 width=84)
-                            Output:["_col0","_col1"]
-                            Filter Operator [FIL_38] (rows=18 width=84)
-                              predicate:((c_int > 0) and key is not null)
-                              TableScan [TS_0] (rows=20 width=84)
-                                default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                      <-Reducer 7 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_23]
-                          PartitionCols:_col0
-                          Select Operator [SEL_12] (rows=1 width=97)
-                            Output:["_col0","_col1","_col2"]
-                          <-Reducer 6 [SORT]
-                            SORT [RS_11]
-                              Select Operator [SEL_9] (rows=1 width=105)
-                                Output:["_col0","_col1","_col2","_col3"]
-                                Group By Operator [GBY_8] (rows=1 width=101)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                <-Map 5 [GROUP]
-                                  GROUP [RS_7]
-                                    PartitionCols:_col0, _col1, _col2
-                                    Group By Operator [GBY_6] (rows=1 width=101)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                      Filter Operator [FIL_39] (rows=2 width=93)
-                                        predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                        TableScan [TS_3] (rows=20 width=88)
-                                          default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                      <-Reducer 9 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_24]
-                          PartitionCols:_col0
-                          Select Operator [SEL_20] (rows=1 width=89)
-                            Output:["_col0","_col1"]
-                            Group By Operator [GBY_19] (rows=1 width=93)
-                              Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
-                            <-Map 8 [GROUP]
-                              GROUP [RS_18]
-                                PartitionCols:_col0, _col1, _col2
-                                Group By Operator [GBY_17] (rows=1 width=93)
-                                  Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
-                                  Filter Operator [FIL_40] (rows=2 width=93)
-                                    predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                    TableScan [TS_14] (rows=20 width=88)
-                                      default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Reducer 6 (PARTITION-LEVEL SORT), Reducer 8 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 5 <- Map 4 (GROUP)
-Reducer 6 <- Reducer 5 (SORT)
-Reducer 8 <- Map 7 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_33]
-        Group By Operator [GBY_31] (rows=1 width=20)
-          Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-        <-Reducer 2 [GROUP]
-          GROUP [RS_30]
-            PartitionCols:_col0, _col1
-            Group By Operator [GBY_29] (rows=1 width=20)
-              Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col4
-              Select Operator [SEL_28] (rows=1 width=20)
-                Output:["_col1","_col4"]
-                Filter Operator [FIL_26] (rows=1 width=20)
-                  predicate:(((_col3 + _col6) >= 2) and ((_col3 > 0) or (_col1 >= 0)))
-                  Join Operator [JOIN_25] (rows=3 width=20)
-                    Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                  <-Map 1 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_22]
-                      PartitionCols:_col0
-                      Select Operator [SEL_2] (rows=18 width=84)
-                        Output:["_col0","_col1"]
-                        Filter Operator [FIL_34] (rows=18 width=84)
-                          predicate:key is not null
-                          TableScan [TS_0] (rows=20 width=84)
-                            default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                  <-Reducer 6 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_23]
-                      PartitionCols:_col0
-                      Select Operator [SEL_12] (rows=1 width=97)
-                        Output:["_col0","_col1","_col2"]
-                      <-Reducer 5 [SORT]
-                        SORT [RS_11]
-                          Select Operator [SEL_9] (rows=1 width=105)
-                            Output:["_col0","_col1","_col2","_col3"]
-                            Group By Operator [GBY_8] (rows=1 width=101)
-                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                            <-Map 4 [GROUP]
-                              GROUP [RS_7]
-                                PartitionCols:_col0, _col1, _col2
-                                Group By Operator [GBY_6] (rows=1 width=101)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                  Filter Operator [FIL_35] (rows=2 width=93)
-                                    predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                    TableScan [TS_3] (rows=20 width=88)
-                                      default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                  <-Reducer 8 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_24]
-                      PartitionCols:_col0
-                      Select Operator [SEL_20] (rows=1 width=89)
-                        Output:["_col0","_col1"]
-                        Group By Operator [GBY_19] (rows=1 width=93)
-                          Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
-                        <-Map 7 [GROUP]
-                          GROUP [RS_18]
-                            PartitionCols:_col0, _col1, _col2
-                            Group By Operator [GBY_17] (rows=1 width=93)
-                              Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
-                              Filter Operator [FIL_36] (rows=2 width=93)
-                                predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                TableScan [TS_14] (rows=20 width=88)
-                                  default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 10 <- Reducer 9 (SORT)
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Reducer 10 (PARTITION-LEVEL SORT), Reducer 7 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 4 <- Reducer 3 (SORT)
-Reducer 6 <- Map 5 (GROUP)
-Reducer 7 <- Reducer 6 (SORT)
-Reducer 9 <- Map 8 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_40]
-        Select Operator [SEL_39] (rows=1 width=20)
-          Output:["_col0","_col1","_col2"]
-        <-Reducer 3 [SORT]
-          SORT [RS_38]
-            Group By Operator [GBY_36] (rows=1 width=20)
-              Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-            <-Reducer 2 [GROUP]
-              GROUP [RS_35]
-                PartitionCols:_col0, _col1
-                Group By Operator [GBY_34] (rows=1 width=20)
-                  Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col4
-                  Select Operator [SEL_33] (rows=1 width=20)
-                    Output:["_col1","_col4"]
-                    Filter Operator [FIL_29] (rows=1 width=20)
-                      predicate:(((_col3 + _col6) >= 0) and ((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1)) and ((_col3 > 0) or (_col1 >= 0)))
-                      Join Operator [JOIN_28] (rows=3 width=20)
-                        Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                      <-Map 1 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_25]
-                          PartitionCols:_col0
-                          Select Operator [SEL_2] (rows=18 width=84)
-                            Output:["_col0","_col1"]
-                            Filter Operator [FIL_41] (rows=18 width=84)
-                              predicate:((c_int > 0) and key is not null)
-                              TableScan [TS_0] (rows=20 width=84)
-                                default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                      <-Reducer 10 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_27]
-                          PartitionCols:_col0
-                          Select Operator [SEL_23] (rows=1 width=89)
-                            Output:["_col0","_col1"]
-                          <-Reducer 9 [SORT]
-                            SORT [RS_22]
-                              Select Operator [SEL_20] (rows=1 width=105)
-                                Output:["_col0","_col1","_col2","_col3"]
-                                Group By Operator [GBY_19] (rows=1 width=101)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                <-Map 8 [GROUP]
-                                  GROUP [RS_18]
-                                    PartitionCols:_col0, _col1, _col2
-                                    Group By Operator [GBY_17] (rows=1 width=101)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                      Filter Operator [FIL_43] (rows=2 width=93)
-                                        predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                        TableScan [TS_14] (rows=20 width=88)
-                                          default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                      <-Reducer 7 [PARTITION-LEVEL SORT]
-                        PARTITION-LEVEL SORT [RS_26]
-                          PartitionCols:_col0
-                          Select Operator [SEL_12] (rows=1 width=97)
-                            Output:["_col0","_col1","_col2"]
-                          <-Reducer 6 [SORT]
-                            SORT [RS_11]
-                              Select Operator [SEL_9] (rows=1 width=105)
-                                Output:["_col0","_col1","_col2","_col3"]
-                                Group By Operator [GBY_8] (rows=1 width=101)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                <-Map 5 [GROUP]
-                                  GROUP [RS_7]
-                                    PartitionCols:_col0, _col1, _col2
-                                    Group By Operator [GBY_6] (rows=1 width=101)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                      Filter Operator [FIL_42] (rows=2 width=93)
-                                        predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                        TableScan [TS_3] (rows=20 width=88)
-                                          default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Reducer 5 (PARTITION-LEVEL SORT), Reducer 7 (PARTITION-LEVEL SORT)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 5 <- Map 4 (GROUP)
-Reducer 7 <- Map 6 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_30]
-        Group By Operator [GBY_28] (rows=1 width=20)
-          Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-        <-Reducer 2 [GROUP]
-          GROUP [RS_27]
-            PartitionCols:_col0, _col1
-            Group By Operator [GBY_26] (rows=1 width=20)
-              Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col4
-              Select Operator [SEL_25] (rows=1 width=20)
-                Output:["_col1","_col4"]
-                Filter Operator [FIL_23] (rows=1 width=20)
-                  predicate:(((_col3 + _col6) >= 0) and ((_col3 > 0) or (_col1 >= 0)))
-                  Join Operator [JOIN_22] (rows=3 width=20)
-                    Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                  <-Map 1 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_19]
-                      PartitionCols:_col0
-                      Select Operator [SEL_2] (rows=18 width=84)
-                        Output:["_col0","_col1"]
-                        Filter Operator [FIL_31] (rows=18 width=84)
-                          predicate:key is not null
-                          TableScan [TS_0] (rows=20 width=84)
-                            default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                  <-Reducer 5 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_20]
-                      PartitionCols:_col0
-                      Select Operator [SEL_9] (rows=1 width=97)
-                        Output:["_col0","_col1","_col2"]
-                        Group By Operator [GBY_8] (rows=1 width=101)
-                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                        <-Map 4 [GROUP]
-                          GROUP [RS_7]
-                            PartitionCols:_col0, _col1, _col2
-                            Group By Operator [GBY_6] (rows=1 width=101)
-                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                              Filter Operator [FIL_32] (rows=2 width=93)
-                                predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                TableScan [TS_3] (rows=20 width=88)
-                                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                  <-Reducer 7 [PARTITION-LEVEL SORT]
-                    PARTITION-LEVEL SORT [RS_21]
-                      PartitionCols:_col0
-                      Select Operator [SEL_17] (rows=1 width=89)
-                        Output:["_col0","_col1"]
-                        Group By Operator [GBY_16] (rows=1 width=93)
-                          Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
-                        <-Map 6 [GROUP]
-                          GROUP [RS_15]
-                            PartitionCols:_col0, _col1, _col2
-                            Group By Operator [GBY_14] (rows=1 width=93)
-                              Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
-                              Filter Operator [FIL_33] (rows=2 width=93)
-                                predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and (c_float > 0) and ((c_int >= 1) or (c_float >= 1)) and ((UDFToFloat(c_int) + c_float) >= 0) and key is not null)
-                                TableScan [TS_11] (rows=20 width=88)
-                                  default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Stage-0
-  Fetch Operator
-    limit:1
-
-PREHOOK: query: explain select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-	UNION  ALL
-	select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-	UNION  ALL
-	select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (SORT), Reducer 5 (SORT), Reducer 7 (SORT)
-Reducer 5 <- Map 4 (GROUP)
-Reducer 7 <- Map 6 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_26]
-        Select Operator [SEL_25] (rows=3 width=87)
-          Output:["_col0"]
-        <-Reducer 2 [SORT]
-          SORT [RS_24]
-            Select Operator [SEL_5] (rows=1 width=87)
-              Output:["_col0"]
-              Group By Operator [GBY_4] (rows=1 width=8)
-                Output:["_col0"],aggregations:["count(VALUE._col0)"]
-              <-Map 1 [GROUP]
-                GROUP [RS_3]
-                  Group By Operator [GBY_2] (rows=1 width=8)
-                    Output:["_col0"],aggregations:["count(key)"]
-                    Select Operator [SEL_1] (rows=20 width=80)
-                      Output:["key"]
-                      TableScan [TS_0] (rows=20 width=80)
-                        default@cbo_t3,s1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-        <-Reducer 5 [SORT]
-          SORT [RS_24]
-            Select Operator [SEL_12] (rows=1 width=87)
-              Output:["_col0"]
-              Group By Operator [GBY_11] (rows=1 width=8)
-                Output:["_col0"],aggregations:["count(VALUE._col0)"]
-              <-Map 4 [GROUP]
-                GROUP [RS_10]
-                  Group By Operator [GBY_9] (rows=1 width=8)
-                    Output:["_col0"],aggregations:["count(key)"]
-                    Select Operator [SEL_8] (rows=20 width=80)
-                      Output:["key"]
-                      TableScan [TS_7] (rows=20 width=80)
-                        default@cbo_t3,s2,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-        <-Reducer 7 [SORT]
-          SORT [RS_24]
-            Select Operator [SEL_21] (rows=1 width=87)
-              Output:["_col0"]
-              Group By Operator [GBY_20] (rows=1 width=8)
-                Output:["_col0"],aggregations:["count(VALUE._col0)"]
-              <-Map 6 [GROUP]
-                GROUP [RS_19]
-                  Group By Operator [GBY_18] (rows=1 width=8)
-                    Output:["_col0"],aggregations:["count(key)"]
-                    Select Operator [SEL_17] (rows=20 width=80)
-                      Output:["key"]
-                      TableScan [TS_16] (rows=20 width=80)
-                        default@cbo_t3,s3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-
-PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-    UNION  ALL
-        select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-    UNION  ALL
-        select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (GROUP), Reducer 6 (GROUP), Reducer 8 (GROUP)
-Reducer 4 <- Reducer 3 (SORT)
-Reducer 6 <- Map 5 (GROUP)
-Reducer 8 <- Map 7 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_31]
-        Select Operator [SEL_30] (rows=1 width=95)
-          Output:["_col0","_col1"]
-        <-Reducer 3 [SORT]
-          SORT [RS_29]
-            Group By Operator [GBY_27] (rows=1 width=95)
-              Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
-            <-Reducer 2 [GROUP]
-              GROUP [RS_26]
-                PartitionCols:_col0
-                Group By Operator [GBY_25] (rows=1 width=95)
-                  Output:["_col0","_col1"],aggregations:["count(1)"],keys:_col0
-                  Select Operator [SEL_5] (rows=1 width=87)
-                    Output:["_col0"]
-                    Group By Operator [GBY_4] (rows=1 width=8)
-                      Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                    <-Map 1 [GROUP]
-                      GROUP [RS_3]
-                        Group By Operator [GBY_2] (rows=1 width=8)
-                          Output:["_col0"],aggregations:["count(key)"]
-                          Select Operator [SEL_1] (rows=20 width=80)
-                            Output:["key"]
-                            TableScan [TS_0] (rows=20 width=80)
-                              default@cbo_t3,s1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-            <-Reducer 6 [GROUP]
-              GROUP [RS_26]
-                PartitionCols:_col0
-                Group By Operator [GBY_25] (rows=1 width=95)
-                  Output:["_col0","_col1"],aggregations:["count(1)"],keys:_col0
-                  Select Operator [SEL_12] (rows=1 width=87)
-                    Output:["_col0"]
-                    Group By Operator [GBY_11] (rows=1 width=8)
-                      Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                    <-Map 5 [GROUP]
-                      GROUP [RS_10]
-                        Group By Operator [GBY_9] (rows=1 width=8)
-                          Output:["_col0"],aggregations:["count(key)"]
-                          Select Operator [SEL_8] (rows=20 width=80)
-                            Output:["key"]
-                            TableScan [TS_7] (rows=20 width=80)
-                              default@cbo_t3,s2,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-            <-Reducer 8 [GROUP]
-              GROUP [RS_26]
-                PartitionCols:_col0
-                Group By Operator [GBY_25] (rows=1 width=95)
-                  Output:["_col0","_col1"],aggregations:["count(1)"],keys:_col0
-                  Select Operator [SEL_21] (rows=1 width=87)
-                    Output:["_col0"]
-                    Group By Operator [GBY_20] (rows=1 width=8)
-                      Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                    <-Map 7 [GROUP]
-                      GROUP [RS_19]
-                        Group By Operator [GBY_18] (rows=1 width=8)
-                          Output:["_col0"],aggregations:["count(key)"]
-                          Select Operator [SEL_17] (rows=20 width=80)
-                            Output:["key"]
-                            TableScan [TS_16] (rows=20 width=80)
-                              default@cbo_t3,s3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-
-PREHOOK: query: explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_10]
-        Join Operator [JOIN_8] (rows=18 width=85)
-          Output:["_col0"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0","1":"_col0"}
-        <-Map 1 [PARTITION-LEVEL SORT]
-          PARTITION-LEVEL SORT [RS_6]
-            PartitionCols:_col0
-            Select Operator [SEL_2] (rows=6 width=85)
-              Output:["_col0"]
-              Filter Operator [FIL_11] (rows=6 width=85)
-                predicate:(UDFToDouble(key) >= 1.0)
-                TableScan [TS_0] (rows=20 width=80)
-                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-        <-Map 3 [PARTITION-LEVEL SORT]
-          PARTITION-LEVEL SORT [RS_7]
-            PartitionCols:_col0
-            Select Operator [SEL_5] (rows=6 width=85)
-              Output:["_col0"]
-              Filter Operator [FIL_12] (rows=6 width=85)
-                predicate:(UDFToDouble(key) >= 1.0)
-                TableScan [TS_3] (rows=20 width=80)
-                  default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-
-PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_8]
-        Select Operator [SEL_7] (rows=100 width=8)
-          Output:["_col0","_col1"]
-          Join Operator [JOIN_6] (rows=100 width=8)
-            Output:["_col1","_col3"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0","1":"_col0"}
-          <-Map 1 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_4]
-              PartitionCols:_col0
-              Select Operator [SEL_1] (rows=20 width=84)
-                Output:["_col0","_col1"]
-                TableScan [TS_0] (rows=20 width=84)
-                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-          <-Map 3 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_5]
-              PartitionCols:_col0
-              Select Operator [SEL_3] (rows=20 width=84)
-                Output:["_col0","_col1"]
-                TableScan [TS_2] (rows=20 width=84)
-                  default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join  cbo_t2 on cbo_t1.key=cbo_t2.key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_8]
-        Select Operator [SEL_7] (rows=100 width=8)
-          Output:["_col0","_col1"]
-          Join Operator [JOIN_6] (rows=100 width=8)
-            Output:["_col1","_col3"],condition map:[{"":"{\"type\":\"Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0","1":"_col0"}
-          <-Map 1 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_4]
-              PartitionCols:_col0
-              Select Operator [SEL_1] (rows=20 width=84)
-                Output:["_col0","_col1"]
-                TableScan [TS_0] (rows=20 width=84)
-                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-          <-Map 3 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_5]
-              PartitionCols:_col0
-              Select Operator [SEL_3] (rows=20 width=84)
-                Output:["_col0","_col1"]
-                TableScan [TS_2] (rows=20 width=84)
-                  default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT), Map 4 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_14]
-        Select Operator [SEL_13] (rows=291 width=101)
-          Output:["_col0","_col1","_col2","_col3","_col4"]
-          Join Operator [JOIN_12] (rows=291 width=101)
-            Output:["_col1","_col2","_col4","_col5","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":0,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-          <-Map 1 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_9]
-              PartitionCols:_col0
-              Select Operator [SEL_2] (rows=18 width=87)
-                Output:["_col0","_col1","_col2"]
-                Filter Operator [FIL_15] (rows=18 width=87)
-                  predicate:key is not null
-                  TableScan [TS_0] (rows=20 width=88)
-                    default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-          <-Map 3 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_10]
-              PartitionCols:_col0
-              Select Operator [SEL_5] (rows=18 width=84)
-                Output:["_col0","_col1"]
-                Filter Operator [FIL_16] (rows=18 width=84)
-                  predicate:key is not null
-                  TableScan [TS_3] (rows=20 width=84)
-                    default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-          <-Map 4 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_11]
-              PartitionCols:_col0
-              Select Operator [SEL_8] (rows=18 width=84)
-                Output:["_col0","_col1"]
-                Filter Operator [FIL_17] (rows=18 width=84)
-                  predicate:key is not null
-                  TableScan [TS_6] (rows=20 width=84)
-                    default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT), Map 4 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_14]
-        Select Operator [SEL_13] (rows=291 width=178)
-          Output:["_col0","_col1","_col2","_col3"]
-          Join Operator [JOIN_12] (rows=291 width=178)
-            Output:["_col0","_col1","_col3","_col4"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":0,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-          <-Map 1 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_9]
-              PartitionCols:_col0
-              Select Operator [SEL_2] (rows=18 width=84)
-                Output:["_col0","_col1"]
-                Filter Operator [FIL_15] (rows=18 width=84)
-                  predicate:key is not null
-                  TableScan [TS_0] (rows=20 width=84)
-                    default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-          <-Map 3 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_10]
-              PartitionCols:_col0
-              Select Operator [SEL_5] (rows=18 width=80)
-                Output:["_col0"]
-                Filter Operator [FIL_16] (rows=18 width=80)
-                  predicate:key is not null
-                  TableScan [TS_3] (rows=20 width=80)
-                    default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-          <-Map 4 [PARTITION-LEVEL SORT]
-            PARTITION-LEVEL SORT [RS_11]
-              PartitionCols:_col0
-              Select Operator [SEL_8] (rows=18 width=84)
-                Output:["_col0","_col1"]
-                Filter Operator [FIL_17] (rows=18 width=84)
-                  predicate:key is not null
-                  TableScan [TS_6] (rows=20 width=84)
-                    default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT), Map 4 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_16]
-        Select Operator [SEL_15] (rows=72 width=101)
-          Output:["_col0","_col1","_col2","_col3","_col4"]
-          Filter Operator [FIL_13] (rows=72 width=101)
-            predicate:(((_col1 > 0) or (_col6 >= 0)) and ((_col1 + _col4) = 2))
-            Join Operator [JOIN_12] (rows=145 width=101)
-              Output:["_col1","_col2","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":0,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-            <-Map 1 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_9]
-                PartitionCols:_col0
-                Select Operator [SEL_2] (rows=9 width=93)
-                  Output:["_col0","_col1","_col2"]
-                  Filter Operator [FIL_17] (rows=9 width=93)
-                    predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                    TableScan [TS_0] (rows=20 width=88)
-                      default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-            <-Map 3 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_10]
-                PartitionCols:_col0
-                Select Operator [SEL_5] (rows=9 width=89)
-                  Output:["_col0","_col1"]
-                  Filter Operator [FIL_18] (rows=9 width=93)
-                    predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                    TableScan [TS_3] (rows=20 width=88)
-                      default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-            <-Map 4 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_11]
-                PartitionCols:_col0
-                Select Operator [SEL_8] (rows=18 width=84)
-                  Output:["_col0","_col1"]
-                  Filter Operator [FIL_19] (rows=18 width=84)
-                    predicate:key is not null
-                    TableScan [TS_6] (rows=20 width=84)
-                      default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0)
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT), Map 4 (PARTITION-LEVEL SORT)
-
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Reducer 2
-      File Output Operator [FS_16]
-        Select Operator [SEL_15] (rows=72 width=101)
-          Output:["_col0","_col1","_col2","_col3","_col4"]
-          Filter Operator [FIL_13] (rows=72 width=101)
-            predicate:(((_col1 > 0) or (_col6 >= 0)) and ((_col1 + _col4) = 2))
-            Join Operator [JOIN_12] (rows=145 width=101)
-              Output:["_col1","_col2","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":0,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-            <-Map 1 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_9]
-                PartitionCols:_col0
-                Select Operator [SEL_2] (rows=9 width=93)
-                  Output:["_col0","_col1","_col2"]
-                  Filter Operator [FIL_17] (rows=9 width=93)
-                    predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                    TableScan [TS_0] (rows=20 width=88)
-                      default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-            <-Map 3 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_10]
-                PartitionCols:_col0
-                Select Operator [SEL_5] (rows=9 width=89)
-                  Output:["_col0","_col1"]
-                  Filter Operator [FIL_18] (rows=9 width=93)
-                    predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0)) and key is not null)
-                    TableScan [TS_3] (rows=20 width=88)
-                      default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-            <-Map 4 [PARTITION-LEVEL SORT]
-              PARTITION-LEVEL SORT [RS_11]
-                PartitionCols:_col0
-                Select Operator [SEL_8] (rows=18 width=84)
-                  Output:["_col0","_col1"]
-                  Filter Operator [FIL_19] (rows=18 width=84)
-                    predicate:key is not null
-                    TableScan [TS_6] (rows=20 width=84)
-                      default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (SORT)
-
-Stage-0
-  Fetch Operator
-    limit:1
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_10]
-        Limit [LIM_9] (rows=1 width=97)
-          Number of rows:1
-          Select Operator [SEL_8] (rows=10 width=97)
-            Output:["_col0","_col1","_col2"]
-          <-Reducer 2 [SORT]
-            SORT [RS_7]
-              Select Operator [SEL_5] (rows=10 width=97)
-                Output:["_col0","_col1","_col2"]
-                Group By Operator [GBY_4] (rows=10 width=101)
-                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                <-Map 1 [GROUP]
-                  GROUP [RS_3]
-                    PartitionCols:_col0, _col1, _col2
-                    Group By Operator [GBY_2] (rows=10 width=101)
-                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                      Select Operator [SEL_1] (rows=20 width=88)
-                        Output:["key","c_int","c_float"]
-                        TableScan [TS_0] (rows=20 width=88)
-                          default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (GROUP)
-Reducer 4 <- Reducer 3 (SORT)
-
-Stage-0
-  Fetch Operator
-    limit:1
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_15]
-        Limit [LIM_14] (rows=1 width=20)
-          Number of rows:1
-          Select Operator [SEL_13] (rows=5 width=20)
-            Output:["_col0","_col1","_col2"]
-          <-Reducer 3 [SORT]
-            SORT [RS_12]
-              Group By Operator [GBY_10] (rows=5 width=20)
-                Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-              <-Reducer 2 [GROUP]
-                GROUP [RS_9]
-                  PartitionCols:_col0, _col1
-                  Group By Operator [GBY_8] (rows=5 width=20)
-                    Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col1, _col0
-                    Select Operator [SEL_5] (rows=10 width=101)
-                      Output:["_col0","_col1"]
-                      Group By Operator [GBY_4] (rows=10 width=101)
-                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                      <-Map 1 [GROUP]
-                        GROUP [RS_3]
-                          PartitionCols:_col0, _col1, _col2
-                          Group By Operator [GBY_2] (rows=10 width=101)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                            Select Operator [SEL_1] (rows=20 width=88)
-                              Output:["key","c_int","c_float"]
-                              TableScan [TS_0] (rows=20 width=88)
-                                default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (GROUP)
-
-Stage-0
-  Fetch Operator
-    limit:5
-    Stage-1
-      Reducer 3
-      File Output Operator [FS_13]
-        Limit [LIM_12] (rows=5 width=85)
-          Number of rows:5
-          Limit [LIM_10] (rows=5 width=85)
-            Number of rows:5
-            Select Operator [SEL_9] (rows=5 width=85)
-              Output:["_col0"]
-            <-Reducer 2 [GROUP]
-              GROUP [RS_8]
-                Limit [LIM_7] (rows=5 width=85)
-                  Number of rows:5
-                  Limit [LIM_5] (rows=5 width=85)
-                    Number of rows:5
-                    Select Operator [SEL_4] (rows=5 width=85)
-                      Output:["_col0"]
-                    <-Map 1 [GROUP]
-                      GROUP [RS_3]
-                        Limit [LIM_2] (rows=5 width=85)
-                          Number of rows:5
-                          Select Operator [SEL_1] (rows=20 width=80)
-                            Output:["_col0"]
-                            TableScan [TS_0] (rows=20 width=80)
-                              default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
-
-PREHOOK: query: explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (SORT)
-Reducer 3 <- Reducer 2 (SORT)
-Reducer 4 <- Reducer 3 (SORT)
-
-Stage-0
-  Fetch Operator
-    limit:5
-    Stage-1
-      Reducer 4
-      File Output Operator [FS_13]
-        Limit [LIM_12] (rows=5 width=89)
-          Number of rows:5
-          Select Operator [SEL_11] (rows=5 width=89)
-            Output:["_col0","_col1"]
-          <-Reducer 3 [SORT]
-            SORT [RS_10]
-              Limit [LIM_8] (rows=5 width=89)
-                Number of rows:5
-                Select Operator [SEL_7] (rows=5 width=89)
-                  Output:["_col0","_col1"]
-                <-Reducer 2 [SORT]
-                  SORT [RS_6]
-                    Limit [LIM_4] (rows=5 width=89)
-                      Number of rows:5
-                      Select Operator [SEL_3] (rows=20 width=84)
-                        Output:["_col0","_col1"]
-                      <-Map 1 [SORT]
-                        SORT [RS_2]
-                          Select Operator [SEL_1] (rows=20 width=84)
-                            Output:["_col0","_col1"]
-                            TableScan [TS_0] (rows=20 width=84)
-                              default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-
-PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
-POSTHOOK: type: QUERY
-Plan optimized by CBO.
-
-Vertex dependency in root stage
-Reducer 2 <- Map 1 (GROUP)
-Reducer 3 <- Reducer 2 (SORT)
-Reducer 4 <- Map 10 (PARTITION-LEVEL SORT), Reducer 3 (PARTITION-LEVEL SORT), Reducer 9 (PARTITION-LEVEL SORT)
-Reducer 5 <- Reducer 4 (GROUP)
-Reducer 6 <- Reducer 5 (SORT)
-Reducer 8 <- Map 7 (GROUP)
-Reducer 9 <- Reducer 8 (SORT)
-
-Stage-0
-  Fetch Operator
-    limit:5
-    Stage-1
-      Reducer 6
-      File Output Operator [FS_45]
-        Limit [LIM_44] (rows=1 width=20)
-          Number of rows:5
-          Select Operator [SEL_43] (rows=1 width=20)
-            Output:["_col0","_col1","_col2"]
-          <-Reducer 5 [SORT]
-            SORT [RS_42]
-              Select Operator [SEL_41] (rows=1 width=20)
-                Output:["_col0","_col1","_col2"]
-                Group By Operator [GBY_40] (rows=1 width=20)
-                  Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1
-                <-Reducer 4 [GROUP]
-                  GROUP [RS_39]
-                    PartitionCols:_col0, _col1
-                    Group By Operator [GBY_38] (rows=1 width=20)
-                      Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col4, _col6
-                      Select Operator [SEL_37] (rows=3 width=20)
-                        Output:["_col4","_col6"]
-                        Filter Operator [FIL_35] (rows=3 width=20)
-                          predicate:(((_col3 > 0) or (_col6 >= 0)) and ((_col3 + _col1) >= 0))
-                          Join Operator [JOIN_34] (rows=7 width=20)
-                            Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"}
-                          <-Map 10 [PARTITION-LEVEL SORT]
-                            PARTITION-LEVEL SORT [RS_33]
-                              PartitionCols:_col0
-                              Select Operator [SEL_30] (rows=18 width=84)
-                                Output:["_col0","_col1"]
-                                Filter Operator [FIL_48] (rows=18 width=84)
-                                  predicate:key is not null
-                                  TableScan [TS_28] (rows=20 width=84)
-                                    default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
-                          <-Reducer 3 [PARTITION-LEVEL SORT]
-                            PARTITION-LEVEL SORT [RS_31]
-                              PartitionCols:_col0
-                              Filter Operator [FIL_12] (rows=2 width=105)
-                                predicate:_col0 is not null
-                                Limit [LIM_10] (rows=3 width=105)
-                                  Number of rows:5
-                                  Select Operator [SEL_9] (rows=3 width=105)
-                                    Output:["_col0","_col1"]
-                                  <-Reducer 2 [SORT]
-                                    SORT [RS_8]
-                                      Select Operator [SEL_6] (rows=3 width=105)
-                                        Output:["_col0","_col1","_col2","_col3"]
-                                        Group By Operator [GBY_5] (rows=3 width=101)
-                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                        <-Map 1 [GROUP]
-                                          GROUP [RS_4]
-                                            PartitionCols:_col0, _col1, _col2
-                                            Group By Operator [GBY_3] (rows=3 width=101)
-                                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                              Filter Operator [FIL_46] (rows=6 width=93)
-                                                predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)))
-                                                TableScan [TS_0] (rows=20 width=88)
-                                                  default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-                          <-Reducer 9 [PARTITION-LEVEL SORT]
-                            PARTITION-LEVEL SORT [RS_32]
-                              PartitionCols:_col0
-                              Filter Operator [FIL_26] (rows=2 width=97)
-                                predicate:_col0 is not null
-                                Limit [LIM_24] (rows=3 width=97)
-                                  Number of rows:5
-                                  Select Operator [SEL_23] (rows=3 width=97)
-                                    Output:["_col0","_col1","_col2"]
-                                  <-Reducer 8 [SORT]
-                                    SORT [RS_22]
-                                      Select Operator [SEL_20] (rows=3 width=97)
-                                        Output:["_col0","_col1","_col2"]
-                                        Group By Operator [GBY_19] (rows=3 width=101)
-                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
-                                        <-Map 7 [GROUP]
-                                          GROUP [RS_18]
-                                            PartitionCols:_col0, _col1, _col2
-                                            Group By Operator [GBY_17] (rows=3 width=101)
-                                              Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
-                                              Filter Operator [FIL_47] (rows=6 width=93)
-                                                predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)))
-                                                TableScan [TS_14] (rows=20 width=88)
-                                                  default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
-
-PREHOOK: query: explain select cbo_t1.c_int           from cbo_t1 left semi join   cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)
-PREHOOK: type: QUERY
-POSTHOOK: query: expl

<TRUNCATED>

[2/2] hive git commit: Revert "HIVE-11133: Support hive.explain.user for Spark (Sahil via Xuefu)"

Posted by pr...@apache.org.
Revert "HIVE-11133: Support hive.explain.user for Spark (Sahil via Xuefu)"

This reverts commit 79e3c5a8d10e60ae1a981e74b0c48011d3fb2cdc.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b7e9105
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b7e9105
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b7e9105

Branch: refs/heads/master
Commit: 0b7e9105dadfe925ee945165421826bab2f1c645
Parents: 5ab03cb
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Mon May 1 19:17:15 2017 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Mon May 1 19:17:15 2017 -0700

----------------------------------------------------------------------
 .../hive/common/jsonexplain/DagJsonParser.java  |  167 -
 .../common/jsonexplain/JsonParserFactory.java   |    4 -
 .../jsonexplain/spark/SparkJsonParser.java      |   35 -
 .../hive/common/jsonexplain/tez/Connection.java |    2 +-
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |   54 +-
 .../hive/common/jsonexplain/tez/Printer.java    |    2 +-
 .../hive/common/jsonexplain/tez/Stage.java      |   20 +-
 .../common/jsonexplain/tez/TezJsonParser.java   |  153 +-
 .../jsonexplain/tez/TezJsonParserUtils.java     |    6 +-
 .../hive/common/jsonexplain/tez/Vertex.java     |   87 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    5 +-
 .../test/resources/testconfiguration.properties |    1 -
 .../hadoop/hive/ql/optimizer/Optimizer.java     |    2 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |   16 +-
 .../apache/hadoop/hive/ql/plan/SparkWork.java   |   10 +-
 .../clientpositive/spark_explainuser_1.q        |  671 --
 .../spark/spark_explainuser_1.q.out             | 5921 ------------------
 17 files changed, 232 insertions(+), 6924 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
deleted file mode 100644
index 1f01685..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/DagJsonParser.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain;
-
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class DagJsonParser implements JsonParser {
-  public final Map<String, Stage> stages = new LinkedHashMap<>();
-  protected final Logger LOG;
-  // the objects that have been printed.
-  public final Set<Object> printSet = new LinkedHashSet<>();
-  // the vertex that should be inlined. <Operator, list of Vertex that is
-  // inlined>
-  public final Map<Op, List<Connection>> inlineMap = new LinkedHashMap<>();
-
-  public DagJsonParser() {
-    super();
-    LOG = LoggerFactory.getLogger(this.getClass().getName());
-  }
-
-  public void extractStagesAndPlans(JSONObject inputObject) throws Exception {
-    // extract stages
-    JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
-    if (dependency != null && dependency.length() > 0) {
-      // iterate for the first time to get all the names of stages.
-      for (String stageName : JSONObject.getNames(dependency)) {
-        this.stages.put(stageName, new Stage(stageName, this));
-      }
-      // iterate for the second time to get all the dependency.
-      for (String stageName : JSONObject.getNames(dependency)) {
-        JSONObject dependentStageNames = dependency.getJSONObject(stageName);
-        this.stages.get(stageName).addDependency(dependentStageNames, this.stages);
-      }
-    }
-    // extract stage plans
-    JSONObject stagePlans = inputObject.getJSONObject("STAGE PLANS");
-    if (stagePlans != null && stagePlans.length() > 0) {
-      for (String stageName : JSONObject.getNames(stagePlans)) {
-        JSONObject stagePlan = stagePlans.getJSONObject(stageName);
-        this.stages.get(stageName).extractVertex(stagePlan);
-      }
-    }
-  }
-
-  /**
-   * @param indentFlag
-   *          help to generate correct indent
-   * @return
-   */
-  public static String prefixString(int indentFlag) {
-    StringBuilder sb = new StringBuilder();
-    for (int index = 0; index < indentFlag; index++) {
-      sb.append("  ");
-    }
-    return sb.toString();
-  }
-
-  /**
-   * @param indentFlag
-   * @param tail
-   *          help to generate correct indent with a specific tail
-   * @return
-   */
-  public static String prefixString(int indentFlag, String tail) {
-    StringBuilder sb = new StringBuilder();
-    for (int index = 0; index < indentFlag; index++) {
-      sb.append("  ");
-    }
-    int len = sb.length();
-    return sb.replace(len - tail.length(), len, tail).toString();
-  }
-
-  @Override
-  public void print(JSONObject inputObject, PrintStream outputStream) throws Exception {
-    LOG.info("JsonParser is parsing:" + inputObject.toString());
-    this.extractStagesAndPlans(inputObject);
-    Printer printer = new Printer();
-    // print out the cbo info
-    if (inputObject.has("cboInfo")) {
-      printer.println(inputObject.getString("cboInfo"));
-      printer.println();
-    }
-    // print out the vertex dependency in root stage
-    for (Stage candidate : this.stages.values()) {
-      if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) {
-        printer.println("Vertex dependency in root stage");
-        for (Entry<Vertex, List<Connection>> entry : candidate.tezStageDependency.entrySet()) {
-          StringBuilder sb = new StringBuilder();
-          sb.append(entry.getKey().name);
-          sb.append(" <- ");
-          boolean printcomma = false;
-          for (Connection connection : entry.getValue()) {
-            if (printcomma) {
-              sb.append(", ");
-            } else {
-              printcomma = true;
-            }
-            sb.append(connection.from.name + " (" + connection.type + ")");
-          }
-          printer.println(sb.toString());
-        }
-        printer.println();
-      }
-    }
-    // print out all the stages that have no childStages.
-    for (Stage candidate : this.stages.values()) {
-      if (candidate.childStages.isEmpty()) {
-        candidate.print(printer, 0);
-      }
-    }
-    outputStream.println(printer.toString());
-  }
-
-  public void addInline(Op op, Connection connection) {
-    List<Connection> list = inlineMap.get(op);
-    if (list == null) {
-      list = new ArrayList<>();
-      list.add(connection);
-      inlineMap.put(op, list);
-    } else {
-      list.add(connection);
-    }
-  }
-
-  public boolean isInline(Vertex v) {
-    for (List<Connection> list : inlineMap.values()) {
-      for (Connection connection : list) {
-        if (connection.from.equals(v)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public abstract String mapEdgeType(String edgeName);
-
-  public abstract String getFrameworkName();
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
index 2a5d47a..db118bf 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/JsonParserFactory.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.common.jsonexplain;
 
-import org.apache.hadoop.hive.common.jsonexplain.spark.SparkJsonParser;
 import org.apache.hadoop.hive.common.jsonexplain.tez.TezJsonParser;
 import org.apache.hadoop.hive.conf.HiveConf;
 
@@ -36,9 +35,6 @@ public class JsonParserFactory {
     if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
       return new TezJsonParser();
     }
-    if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
-      return new SparkJsonParser();
-    }
     return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
deleted file mode 100644
index 9485aa4..0000000
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/spark/SparkJsonParser.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.common.jsonexplain.spark;
-
-import org.apache.hadoop.hive.common.jsonexplain.DagJsonParser;
-
-
-public class SparkJsonParser extends DagJsonParser {
-
-  @Override
-  public String mapEdgeType(String edgeName) {
-    return edgeName;
-  }
-
-  @Override
-  public String getFrameworkName() {
-    return "Spark";
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
index 0df6f4c..5cd0e4c 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 public final class Connection implements Comparable<Connection>{
   public final String type;

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
index 03c5981..96e75c0 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -27,7 +27,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.hadoop.hive.common.jsonexplain.Vertex.VertexType;
+import org.apache.hadoop.hive.common.jsonexplain.tez.Vertex.VertexType;
 import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
@@ -35,7 +35,7 @@ import org.json.JSONObject;
 public final class Op {
   public final String name;
   // tezJsonParser
-  public final DagJsonParser parser;
+  public final TezJsonParser parser;
   public final String operatorId;
   public Op parent;
   public final List<Op> children;
@@ -54,7 +54,7 @@ public final class Op {
   };
 
   public Op(String name, String id, String outputVertexName, List<Op> children,
-      Map<String, String> attrs, JSONObject opObject, Vertex vertex, DagJsonParser tezJsonParser)
+      Map<String, String> attrs, JSONObject opObject, Vertex vertex, TezJsonParser tezJsonParser)
       throws JSONException {
     super();
     this.name = name;
@@ -89,27 +89,25 @@ public final class Op {
     if (this.type == OpType.MAPJOIN) {
       JSONObject joinObj = opObject.getJSONObject(this.name);
       // get the map for posToVertex
+      JSONObject verticeObj = joinObj.getJSONObject("input vertices:");
       Map<String, Vertex> posToVertex = new LinkedHashMap<>();
-      if (joinObj.has("input vertices:")) {
-        JSONObject verticeObj = joinObj.getJSONObject("input vertices:");
-        for (String pos : JSONObject.getNames(verticeObj)) {
-          String vertexName = verticeObj.getString(pos);
-          // update the connection
-          Connection c = null;
-          for (Connection connection : vertex.parentConnections) {
-            if (connection.from.name.equals(vertexName)) {
-              posToVertex.put(pos, connection.from);
-              c = connection;
-              break;
-            }
-          }
-          if (c != null) {
-            parser.addInline(this, c);
+      for (String pos : JSONObject.getNames(verticeObj)) {
+        String vertexName = verticeObj.getString(pos);
+        // update the connection
+        Connection c = null;
+        for (Connection connection : vertex.parentConnections) {
+          if (connection.from.name.equals(vertexName)) {
+            posToVertex.put(pos, connection.from);
+            c = connection;
+            break;
           }
         }
-        // update the attrs
-        this.attrs.remove("input vertices:");
+        if (c != null) {
+          parser.addInline(this, c);
+        }
       }
+      // update the attrs
+      this.attrs.remove("input vertices:");
       // update the keys to use operator name
       JSONObject keys = joinObj.getJSONObject("keys:");
       // find out the vertex for the big table
@@ -279,11 +277,11 @@ public final class Op {
 
   private String getNameWithOpIdStats() {
     StringBuffer sb = new StringBuffer();
-    sb.append(DagJsonParserUtils.renameReduceOutputOperator(name, vertex));
+    sb.append(TezJsonParserUtils.renameReduceOutputOperator(name, vertex));
     if (operatorId != null) {
       sb.append(" [" + operatorId + "]");
     }
-    if (!DagJsonParserUtils.OperatorNoStats.contains(name) && attrs.containsKey("Statistics:")) {
+    if (!TezJsonParserUtils.OperatorNoStats.contains(name) && attrs.containsKey("Statistics:")) {
       sb.append(" (" + attrs.get("Statistics:") + ")");
     }
     attrs.remove("Statistics:");
@@ -301,15 +299,15 @@ public final class Op {
   public void print(Printer printer, int indentFlag, boolean branchOfJoinOp) throws Exception {
     // print name
     if (parser.printSet.contains(this)) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + " Please refer to the previous "
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous "
           + this.getNameWithOpIdStats());
       return;
     }
     parser.printSet.add(this);
     if (!branchOfJoinOp) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + this.getNameWithOpIdStats());
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.getNameWithOpIdStats());
     } else {
-      printer.println(DagJsonParser.prefixString(indentFlag, "<-") + this.getNameWithOpIdStats());
+      printer.println(TezJsonParser.prefixString(indentFlag, "<-") + this.getNameWithOpIdStats());
     }
     branchOfJoinOp = false;
     // if this operator is a Map Join Operator or a Merge Join Operator
@@ -332,8 +330,8 @@ public final class Op {
     // print attr
     indentFlag++;
     if (!attrs.isEmpty()) {
-      printer.println(DagJsonParser.prefixString(indentFlag)
-          + DagJsonParserUtils.attrsToString(attrs));
+      printer.println(TezJsonParser.prefixString(indentFlag)
+          + TezJsonParserUtils.attrsToString(attrs));
     }
     // print inline vertex
     if (parser.inlineMap.containsKey(this)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
index 6f040f6..d3c91d6 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 public final class Printer {
   public static final String lineSeparator = System.getProperty("line.separator");;

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
index d21a565..63937f8 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -26,7 +26,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.jsonexplain.Vertex.VertexType;
+import org.apache.hadoop.hive.common.jsonexplain.tez.Vertex.VertexType;
 import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
@@ -37,7 +37,7 @@ public final class Stage {
   //internal name is used to track the stages
   public final String internalName;
   //tezJsonParser
-  public final DagJsonParser parser;
+  public final TezJsonParser parser;
   // upstream stages, e.g., root stage
   public final List<Stage> parentStages = new ArrayList<>();
   // downstream stages.
@@ -49,7 +49,7 @@ public final class Stage {
   // fetch operator.
   Op op;
 
-  public Stage(String name, DagJsonParser tezJsonParser) {
+  public Stage(String name, TezJsonParser tezJsonParser) {
     super();
     internalName = name;
     externalName = name;
@@ -85,9 +85,9 @@ public final class Stage {
    *           and/or attributes.
    */
   public void extractVertex(JSONObject object) throws Exception {
-    if (object.has(this.parser.getFrameworkName())) {
+    if (object.has("Tez")) {
       this.tezStageDependency = new TreeMap<>();
-      JSONObject tez = (JSONObject) object.get(this.parser.getFrameworkName());
+      JSONObject tez = (JSONObject) object.get("Tez");
       JSONObject vertices = tez.getJSONObject("Vertices:");
       if (tez.has("Edges:")) {
         JSONObject edges = tez.getJSONObject("Edges:");
@@ -233,12 +233,12 @@ public final class Stage {
   public void print(Printer printer, int indentFlag) throws Exception {
     // print stagename
     if (parser.printSet.contains(this)) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + " Please refer to the previous "
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous "
           + externalName);
       return;
     }
     parser.printSet.add(this);
-    printer.println(DagJsonParser.prefixString(indentFlag) + externalName);
+    printer.println(TezJsonParser.prefixString(indentFlag) + externalName);
     // print vertexes
     indentFlag++;
     for (Vertex candidate : this.vertexs.values()) {
@@ -247,8 +247,8 @@ public final class Stage {
       }
     }
     if (!attrs.isEmpty()) {
-      printer.println(DagJsonParser.prefixString(indentFlag)
-          + DagJsonParserUtils.attrsToString(attrs));
+      printer.println(TezJsonParser.prefixString(indentFlag)
+          + TezJsonParserUtils.attrsToString(attrs));
     }
     if (op != null) {
       op.print(printer, indentFlag, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
index 294dc6b..ea86048 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
@@ -18,29 +18,146 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-import org.apache.hadoop.hive.common.jsonexplain.DagJsonParser;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
+import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
+import org.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-public class TezJsonParser extends DagJsonParser {
+public final class TezJsonParser implements JsonParser {
+  public final Map<String, Stage> stages = new LinkedHashMap<>();
+  protected final Logger LOG;
+  // the objects that have been printed.
+  public final Set<Object> printSet = new LinkedHashSet<>();
+  // the vertex that should be inlined. <Operator, list of Vertex that is
+  // inlined>
+  public final Map<Op, List<Connection>> inlineMap = new LinkedHashMap<>();
 
-  @Override
-  public String mapEdgeType(String edgeName) {
-    switch (edgeName) {
-      case "BROADCAST_EDGE":
-        return "BROADCAST";
-      case "SIMPLE_EDGE":
-        return "SHUFFLE";
-      case "CUSTOM_SIMPLE_EDGE":
-        return "PARTITION_ONLY_SHUFFLE";
-      case "CUSTOM_EDGE":
-        return "MULTICAST";
-      default:
-        return "UNKNOWN";
+  public TezJsonParser() {
+    super();
+    LOG = LoggerFactory.getLogger(this.getClass().getName());
+  }
+
+  public void extractStagesAndPlans(JSONObject inputObject) throws Exception {
+    // extract stages
+    JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
+    if (dependency != null && dependency.length() > 0) {
+      // iterate for the first time to get all the names of stages.
+      for (String stageName : JSONObject.getNames(dependency)) {
+        this.stages.put(stageName, new Stage(stageName, this));
+      }
+      // iterate for the second time to get all the dependency.
+      for (String stageName : JSONObject.getNames(dependency)) {
+        JSONObject dependentStageNames = dependency.getJSONObject(stageName);
+        this.stages.get(stageName).addDependency(dependentStageNames, this.stages);
+      }
+    }
+    // extract stage plans
+    JSONObject stagePlans = inputObject.getJSONObject("STAGE PLANS");
+    if (stagePlans != null && stagePlans.length() > 0) {
+      for (String stageName : JSONObject.getNames(stagePlans)) {
+        JSONObject stagePlan = stagePlans.getJSONObject(stageName);
+        this.stages.get(stageName).extractVertex(stagePlan);
+      }
+    }
+  }
+
+  /**
+   * @param indentFlag
+   *          help to generate correct indent
+   * @return
+   */
+  public static String prefixString(int indentFlag) {
+    StringBuilder sb = new StringBuilder();
+    for (int index = 0; index < indentFlag; index++) {
+      sb.append("  ");
+    }
+    return sb.toString();
+  }
+
+  /**
+   * @param indentFlag
+   * @param tail
+   *          help to generate correct indent with a specific tail
+   * @return
+   */
+  public static String prefixString(int indentFlag, String tail) {
+    StringBuilder sb = new StringBuilder();
+    for (int index = 0; index < indentFlag; index++) {
+      sb.append("  ");
     }
+    int len = sb.length();
+    return sb.replace(len - tail.length(), len, tail).toString();
   }
 
   @Override
-  public String getFrameworkName() {
-    return "Tez";
+  public void print(JSONObject inputObject, PrintStream outputStream) throws Exception {
+    LOG.info("JsonParser is parsing:" + inputObject.toString());
+    this.extractStagesAndPlans(inputObject);
+    Printer printer = new Printer();
+    // print out the cbo info
+    if (inputObject.has("cboInfo")) {
+      printer.println(inputObject.getString("cboInfo"));
+      printer.println();
+    }
+    // print out the vertex dependency in root stage
+    for (Stage candidate : this.stages.values()) {
+      if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) {
+        printer.println("Vertex dependency in root stage");
+        for (Entry<Vertex, List<Connection>> entry : candidate.tezStageDependency.entrySet()) {
+          StringBuilder sb = new StringBuilder();
+          sb.append(entry.getKey().name);
+          sb.append(" <- ");
+          boolean printcomma = false;
+          for (Connection connection : entry.getValue()) {
+            if (printcomma) {
+              sb.append(", ");
+            } else {
+              printcomma = true;
+            }
+            sb.append(connection.from.name + " (" + connection.type + ")");
+          }
+          printer.println(sb.toString());
+        }
+        printer.println();
+      }
+    }
+    // print out all the stages that have no childStages.
+    for (Stage candidate : this.stages.values()) {
+      if (candidate.childStages.isEmpty()) {
+        candidate.print(printer, 0);
+      }
+    }
+    outputStream.println(printer.toString());
+  }
+
+  public void addInline(Op op, Connection connection) {
+    List<Connection> list = inlineMap.get(op);
+    if (list == null) {
+      list = new ArrayList<>();
+      list.add(connection);
+      inlineMap.put(op, list);
+    } else {
+      list.add(connection);
+    }
+  }
+
+  public boolean isInline(Vertex v) {
+    for (List<Connection> list : inlineMap.values()) {
+      for (Connection connection : list) {
+        if (connection.from.equals(v)) {
+          return true;
+        }
+      }
+    }
+    return false;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
index a518ac1..363a422 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParserUtils.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.util.Arrays;
 import java.util.List;
@@ -24,14 +24,14 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 
-public class DagJsonParserUtils {
+public class TezJsonParserUtils {
 
   public static List<String> OperatorNoStats = Arrays.asList(new String[] { "File Output Operator",
       "Reduce Output Operator" });
 
   public static String renameReduceOutputOperator(String operatorName, Vertex vertex) {
     if (operatorName.equals("Reduce Output Operator") && vertex.edgeType != null) {
-      return vertex.edgeType;
+      return vertex.edgeType.name();
     } else {
       return operatorName;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
index c93059d..13ecac0 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hive.common.jsonexplain;
+package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -25,7 +25,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.hadoop.hive.common.jsonexplain.Op.OpType;
+import org.apache.hadoop.hive.common.jsonexplain.tez.Op.OpType;
 import org.codehaus.jackson.JsonParseException;
 import org.codehaus.jackson.map.JsonMappingException;
 import org.json.JSONArray;
@@ -35,7 +35,7 @@ import org.json.JSONObject;
 public final class Vertex implements Comparable<Vertex>{
   public final String name;
   //tezJsonParser
-  public final DagJsonParser parser;
+  public final TezJsonParser parser;
   // vertex's parent connections.
   public final List<Connection> parentConnections = new ArrayList<>();
   // vertex's children vertex.
@@ -67,9 +67,9 @@ public final class Vertex implements Comparable<Vertex>{
   public static enum EdgeType {
     BROADCAST, SHUFFLE, MULTICAST, PARTITION_ONLY_SHUFFLE, UNKNOWN
   };
-  public String edgeType;
+  public EdgeType edgeType;
 
-  public Vertex(String name, JSONObject vertexObject, DagJsonParser dagJsonParser) {
+  public Vertex(String name, JSONObject vertexObject, TezJsonParser tezJsonParser) {
     super();
     this.name = name;
     if (this.name != null) {
@@ -87,7 +87,7 @@ public final class Vertex implements Comparable<Vertex>{
     }
     this.dummy = false;
     this.vertexObject = vertexObject;
-    parser = dagJsonParser;
+    parser = tezJsonParser;
   }
 
   public void addDependency(Connection connection) throws JSONException {
@@ -138,8 +138,6 @@ public final class Vertex implements Comparable<Vertex>{
           }
         } else if (key.equals("tag:")) {
           this.tag = vertexObject.getString(key);
-        } else if (key.equals("Local Work:")) {
-          extractOp(vertexObject.getJSONObject(key));
         } else {
           throw new Exception("Unsupported operator tree in vertex " + this.name);
         }
@@ -171,34 +169,32 @@ public final class Vertex implements Comparable<Vertex>{
       List<Op> children = new ArrayList<>();
       String id = null;
       String outputVertexName = null;
-      if (JSONObject.getNames(attrObj) != null) {
-        for (String attrName : JSONObject.getNames(attrObj)) {
-          if (attrName.equals("children")) {
-            Object childrenObj = attrObj.get(attrName);
-            if (childrenObj instanceof JSONObject) {
-              if (((JSONObject) childrenObj).length() != 0) {
-                children.add(extractOp((JSONObject) childrenObj));
-              }
-            } else if (childrenObj instanceof JSONArray) {
-              if (((JSONArray) childrenObj).length() != 0) {
-                JSONArray array = ((JSONArray) childrenObj);
-                for (int index = 0; index < array.length(); index++) {
-                  children.add(extractOp(array.getJSONObject(index)));
-                }
+      for (String attrName : JSONObject.getNames(attrObj)) {
+        if (attrName.equals("children")) {
+          Object childrenObj = attrObj.get(attrName);
+          if (childrenObj instanceof JSONObject) {
+            if (((JSONObject) childrenObj).length() != 0) {
+              children.add(extractOp((JSONObject) childrenObj));
+            }
+          } else if (childrenObj instanceof JSONArray) {
+            if (((JSONArray) childrenObj).length() != 0) {
+              JSONArray array = ((JSONArray) childrenObj);
+              for (int index = 0; index < array.length(); index++) {
+                children.add(extractOp(array.getJSONObject(index)));
               }
-            } else {
-              throw new Exception("Unsupported operator " + this.name
-                      + "'s children operator is neither a jsonobject nor a jsonarray");
             }
           } else {
-            if (attrName.equals("OperatorId:")) {
-              id = attrObj.get(attrName).toString();
-            } else if (attrName.equals("outputname:")) {
-              outputVertexName = attrObj.get(attrName).toString();
-            } else {
-              if (!attrObj.get(attrName).toString().isEmpty()) {
-                attrs.put(attrName, attrObj.get(attrName).toString());
-              }
+            throw new Exception("Unsupported operator " + this.name
+                + "'s children operator is neither a jsonobject nor a jsonarray");
+          }
+        } else {
+          if (attrName.equals("OperatorId:")) {
+            id = attrObj.get(attrName).toString();
+          } else if (attrName.equals("outputname:")) {
+            outputVertexName = attrObj.get(attrName).toString();
+          } else {
+            if (!attrObj.get(attrName).toString().isEmpty()) {
+              attrs.put(attrName, attrObj.get(attrName).toString());
             }
           }
         }
@@ -220,20 +216,20 @@ public final class Vertex implements Comparable<Vertex>{
     // print vertexname
     if (parser.printSet.contains(this) && numReduceOp <= 1) {
       if (type != null) {
-        printer.println(DagJsonParser.prefixString(indentFlag, "<-")
+        printer.println(TezJsonParser.prefixString(indentFlag, "<-")
             + " Please refer to the previous " + this.name + " [" + type + "]");
       } else {
-        printer.println(DagJsonParser.prefixString(indentFlag, "<-")
+        printer.println(TezJsonParser.prefixString(indentFlag, "<-")
             + " Please refer to the previous " + this.name);
       }
       return;
     }
     parser.printSet.add(this);
     if (type != null) {
-      printer.println(DagJsonParser.prefixString(indentFlag, "<-") + this.name + " [" + type + "]"
+      printer.println(TezJsonParser.prefixString(indentFlag, "<-") + this.name + " [" + type + "]"
           + this.executionMode);
     } else if (this.name != null) {
-      printer.println(DagJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.name + this.executionMode);
     }
     // print operators
     if (numReduceOp > 1 && !(callingVertex.vertexType == VertexType.UNION)) {
@@ -286,7 +282,22 @@ public final class Vertex implements Comparable<Vertex>{
   }
 
   public void setType(String type) {
-    this.edgeType = this.parser.mapEdgeType(type);
+    switch (type) {
+    case "BROADCAST_EDGE":
+      this.edgeType = EdgeType.BROADCAST;
+      break;
+    case "SIMPLE_EDGE":
+      this.edgeType = EdgeType.SHUFFLE;
+      break;
+    case "CUSTOM_SIMPLE_EDGE":
+      this.edgeType = EdgeType.PARTITION_ONLY_SHUFFLE;
+      break;
+    case "CUSTOM_EDGE":
+      this.edgeType = EdgeType.MULTICAST;
+      break;
+    default:
+      this.edgeType = EdgeType.UNKNOWN;
+    }
   }
 
   // The following code should be gone after HIVE-11075 using topological order

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ea8485d..3400560 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2218,10 +2218,7 @@ public class HiveConf extends Configuration {
         "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."),
     HIVE_EXPLAIN_USER("hive.explain.user", true,
         "Whether to show explain result at user level.\n" +
-        "When enabled, will log EXPLAIN output for the query at user level. Tez only."),
-    HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
-        "Whether to show explain result at user level.\n" +
-        "When enabled, will log EXPLAIN output for the query at user level. Spark only."),
+        "When enabled, will log EXPLAIN output for the query at user level."),
 
     // prefix used to auto generated column aliases (this should be started with '_')
     HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 753f3a9..a53fc1a 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1370,7 +1370,6 @@ spark.query.files=add_part_multiple.q, \
 spark.only.query.files=spark_combine_equivalent_work.q,\
   spark_dynamic_partition_pruning.q,\
   spark_dynamic_partition_pruning_2.q,\
-  spark_explainuser_1.q,\
   spark_vectorized_dynamic_partition_pruning.q,\
   spark_use_file_size_for_mapjoin.q,\
   spark_use_op_stats.q

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 92225ac..a3a19f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -210,7 +210,7 @@ public class Optimizer {
     if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCORRELATION) &&
         !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW) &&
         !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME) &&
-        !isTezExecEngine && !isSparkExecEngine) {
+        !isTezExecEngine) {
       transformations.add(new CorrelationOptimizer());
     }
     if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE) > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 7a0d4a7..668783a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -191,20 +191,8 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
         && !config.isDependency()
         && !config.isLogical()
         && !config.isAuthorize()
-        && (
-             (
-               HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_EXPLAIN_USER)
-               &&
-               HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")
-             )
-             ||
-             (
-               HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_SPARK_EXPLAIN_USER)
-               &&
-               HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")
-             )
-           )
-        );
+        && (HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_EXPLAIN_USER) && HiveConf
+            .getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")));
 
     ExplainWork work = new ExplainWork(ctx.getResFile(),
         pCtx,

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 9d46cac..066e32d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -34,7 +34,6 @@ import java.util.Set;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 import com.google.common.base.Preconditions;
 
@@ -44,8 +43,7 @@ import com.google.common.base.Preconditions;
  * roots and and ReduceWork at all other nodes.
  */
 @SuppressWarnings("serial")
-@Explain(displayName = "Spark", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+@Explain(displayName = "Spark", vectorization = Vectorization.SUMMARY_PATH)
 public class SparkWork extends AbstractOperatorDesc {
   private static int counter;
   private final String name;
@@ -78,8 +76,7 @@ public class SparkWork extends AbstractOperatorDesc {
   /**
    * @return a map of "vertex name" to BaseWork
    */
-  @Explain(displayName = "Vertices", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Vertices", vectorization = Vectorization.SUMMARY_PATH)
   public Map<String, BaseWork> getWorkMap() {
     Map<String, BaseWork> result = new LinkedHashMap<String, BaseWork>();
     for (BaseWork w: getAllWork()) {
@@ -381,8 +378,7 @@ public class SparkWork extends AbstractOperatorDesc {
     }
    }
 
-  @Explain(displayName = "Edges", explainLevels = { Explain.Level.USER, Explain.Level.DEFAULT, Explain.Level.EXTENDED },
-      vectorization = Vectorization.SUMMARY_PATH)
+  @Explain(displayName = "Edges")
   public Map<ComparableName, List<Dependency>> getDependencyMap() {
     Map<String, String> allDependencies = new HashMap<String, String>();
     Map<ComparableName, List<Dependency>> result =

http://git-wip-us.apache.org/repos/asf/hive/blob/0b7e9105/ql/src/test/queries/clientpositive/spark_explainuser_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/spark_explainuser_1.q b/ql/src/test/queries/clientpositive/spark_explainuser_1.q
deleted file mode 100644
index 43252f0..0000000
--- a/ql/src/test/queries/clientpositive/spark_explainuser_1.q
+++ /dev/null
@@ -1,671 +0,0 @@
-set hive.strict.checks.bucketing=false;
-
-set hive.mapred.mode=nonstrict;
-set hive.spark.explain.user=true;
-
-explain create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc;
-create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc;
-
-alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-
-explain insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src;
-insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src;
-explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100;
-
-explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-
-alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate;
-
-
-explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-
-drop table src_orc_merge_test_part;
-
-set hive.auto.convert.join=true;
-
-explain select sum(hash(a.k1,a.v1,a.k2, a.v2))
-from (
-select src1.key as k1, src1.value as v1,
-       src2.key as k2, src2.value as v2 FROM
-  (select * FROM src WHERE src.key < 10) src1
-    JOIN
-  (select * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-) a;
-
-set hive.cbo.enable=true;
-set hive.exec.check.crossproducts=false;
-
-set hive.stats.fetch.column.stats=true;
-set hive.auto.convert.join=false;
-
-explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key;
-explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int desc;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c;
-
-explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc;
-
-explain select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-	UNION  ALL
-	select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key;
-
-explain select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
-    UNION  ALL
-        select 'min' as key,  min(c_int) as value from cbo_t3 s2
-    UNION ALL
-        select 'avg' as key,  avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key;
-
-explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1;
-explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join  cbo_t2 on cbo_t1.key=cbo_t2.key;
-explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join  cbo_t2 on cbo_t1.key=cbo_t2.key;
-
-explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key;
-explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a;
-
-explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0);
-
-explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where  (q + 1 = 2) and (R.b > 0 or c_int >= 0);
-
-explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1;
-explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1;
-explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5;
-explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5;
-
-explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5;
-
-explain select cbo_t1.c_int           from cbo_t1 left semi join   cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0);
-explain select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1  where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2  where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where  (b + 1 = 2) and (R.b > 0 or c >= 0);
-explain select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1  >= 0) and (b > 0 or a >= 0) group by a, c  having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a;
-
-explain select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1;
-
-explain select null from cbo_t1;
-
-explain select key from cbo_t1 where c_int = -6  or c_int = +6;
-
-explain select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt  = cbo_t2.dt  where cbo_t1.dt = '2014' ;
-
-explain select *
-from src_cbo b
-where not exists
-  (select distinct a.key
-  from src_cbo a
-  where b.value = a.value and a.value > 'val_2'
-  )
-;
-
-explain select *
-from src_cbo b
-group by key, value
-having not exists
-  (select a.key
-  from src_cbo a
-  where b.value = a.value  and a.key = b.key and a.value > 'val_12'
-  )
-;
-
-create view cv1 as
-select *
-from src_cbo b
-where exists
-  (select a.key
-  from src_cbo a
-  where b.value = a.value  and a.key = b.key and a.value > 'val_9')
-;
-
-explain select * from cv1;
-
-explain select *
-from (select *
-      from src_cbo b
-      where exists
-          (select a.key
-          from src_cbo a
-          where b.value = a.value  and a.key = b.key and a.value > 'val_9')
-     ) a
-;
-
-
-explain select *
-from src_cbo
-where src_cbo.key in (select key from src_cbo s1 where s1.key > '9')
-;
-
-
-explain select p.p_partkey, li.l_suppkey
-from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
-where li.l_linenumber = 1 and
- li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
-;
-
-explain select key, value, count(*)
-from src_cbo b
-where b.key in (select key from src_cbo where src_cbo.key > '8')
-group by key, value
-having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key )
-;
-
-explain select p_mfgr, p_name, avg(p_size)
-from part
-group by p_mfgr, p_name
-having p_name in
-  (select first_value(p_name) over(partition by p_mfgr order by p_size) from part)
-;
-
-explain select *
-from src_cbo
-where src_cbo.key not in
-  ( select key  from src_cbo s1
-    where s1.key > '2'
-  ) order by key
-;
-
-explain select p_mfgr, b.p_name, p_size
-from part b
-where b.p_name not in
-  (select p_name
-  from (select p_mfgr, p_name, p_size as r from part) a
-  where r < 10 and b.p_mfgr = a.p_mfgr
-  )
-;
-
-explain select p_name, p_size
-from
-part where part.p_size not in
-  (select avg(p_size)
-  from (select p_size from part) a
-  where p_size < 10
-  ) order by p_name
-;
-
-explain select b.p_mfgr, min(p_retailprice)
-from part b
-group by b.p_mfgr
-having b.p_mfgr not in
-  (select p_mfgr
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a
-  where min(p_retailprice) = l and r - l > 600
-  )
-  order by b.p_mfgr
-;
-
-explain select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1;
-explain select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1;
-explain select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1;
-explain select *, rank() over(partition by key order by value) as rr from src1;
-
-
-set hive.auto.convert.join=false;
-set hive.optimize.correlation=false;
-explain
-select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (select x.key AS key, count(1) AS cnt
-      FROM src1 x JOIN src y ON (x.key = y.key)
-      GROUP BY x.key) tmp;
-
-set hive.optimize.correlation=true;
-explain
-select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (select x.key AS key, count(1) AS cnt
-      FROM src1 x JOIN src y ON (x.key = y.key)
-      GROUP BY x.key) tmp;
-
-set hive.auto.convert.join=true;
-set hive.optimize.correlation=true;
-explain
-select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (select x.key AS key, count(1) AS cnt
-      FROM src1 x JOIN src y ON (x.key = y.key)
-      GROUP BY x.key) tmp;
-
-set hive.auto.convert.join=false;
-set hive.optimize.correlation=false;
-explain
-select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
-FROM (select x.key AS key, count(1) AS cnt
-      FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
-      GROUP BY x.key) tmp;
-
-explain create table abcd (a int, b int, c int, d int);
-create table abcd (a int, b int, c int, d int);
-LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd;
-
-set hive.map.aggr=true;
-explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-
-set hive.map.aggr=false;
-explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-
-explain create table src_rc_merge_test(key int, value string) stored as rcfile;
-create table src_rc_merge_test(key int, value string) stored as rcfile;
-
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test;
-
-set hive.exec.compress.output = true;
-
-explain create table tgt_rc_merge_test(key int, value string) stored as rcfile;
-create table tgt_rc_merge_test(key int, value string) stored as rcfile;
-insert into table tgt_rc_merge_test select * from src_rc_merge_test;
-
-show table extended like `tgt_rc_merge_test`;
-
-explain select count(1) from tgt_rc_merge_test;
-explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
-
-alter table tgt_rc_merge_test concatenate;
-
-show table extended like `tgt_rc_merge_test`;
-
-explain select count(1) from tgt_rc_merge_test;
-explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
-
-drop table src_rc_merge_test;
-drop table tgt_rc_merge_test;
-
-explain select src.key from src cross join src src2;
-
-
-explain create table nzhang_Tmp(a int, b string);
-create table nzhang_Tmp(a int, b string);
-
-explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-
-
-explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-
-create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-
-explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-
-create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-explain create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-
-set hive.map.aggr=false;
-set hive.groupby.skewindata=true;
-
-
-explain
-select src1.key as k1, src1.value as v1,
-       src2.key as k2, src2.value as v2 FROM
-  (select * FROM src WHERE src.key < 10) src1
-    JOIN
-  (select * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2;
-
-
-CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1;
-
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value;
-
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key;
-
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key;
-
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
-
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
-
-explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value;
-explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value;
-explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value;
-
-explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value;
-
-CREATE TABLE smb_input(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
-LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
-
-
-;
-
-CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
-CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-
-from smb_input
-insert overwrite table smb_input1 select *
-insert overwrite table smb_input2 select *;
-
-SET hive.optimize.bucketmapjoin = true;
-SET hive.optimize.bucketmapjoin.sortedmerge = true;
-SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-
-analyze table smb_input1 compute statistics;
-
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key;
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value;
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key;
-explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key;
-explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key;
-
-drop table sales;
-drop table things;
-
-set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
-CREATE TABLE sales (name STRING, id INT)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
-
-CREATE TABLE things (id INT, name STRING) partitioned by (ds string)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
-
-load data local inpath '../../data/files/sales.txt' INTO TABLE sales;
-load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23');
-load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24');
-
-explain select name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id);
-
-drop table sales;
-drop table things;
-
-set hive.auto.convert.join=true;
-set hive.auto.convert.join.noconditionaltask=true;
-set hive.auto.convert.join.noconditionaltask.size=10000;
-set hive.stats.fetch.column.stats=false;
-
-set hive.mapjoin.optimized.hashtable=false;
-
-explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450';
-
-set hive.mapjoin.optimized.hashtable=true;
-
-explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450';
-set hive.stats.fetch.column.stats=true;
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part
-  partition by p_mfgr
-  order by p_name
-  );
-
-explain
-select p_mfgr, p_name,
-p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
-from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
-distribute by j.p_mfgr
-sort by j.p_name)
-;
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part
-  partition by p_mfgr
-  order by p_name
-  ) abc;
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
-from noop(on part
-          partition by p_mfgr
-          order by p_name
-          )
-;
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
-from noop(on part
-          partition by p_mfgr
-          order by p_name
-          )
-group by p_mfgr, p_name, p_size
-;
-
-explain
-select abc.*
-from noop(on part
-partition by p_mfgr
-order by p_name
-) abc join part p1 on abc.p_partkey = p1.p_partkey;
-
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name, p_size desc) as r
-from noopwithmap(on part
-partition by p_mfgr
-order by p_name, p_size desc);
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noopwithmap(on part
-  partition by p_mfgr
-  order by p_name);
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part
-partition by p_mfgr
-order by p_name)
-;
-
-explain
-select p_mfgr, p_name, p_size,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
-from noop(on noopwithmap(on noop(on part
-partition by p_mfgr
-order by p_mfgr DESC, p_name
-)));
-
-explain
-select p_mfgr, p_name,
-sub1.cd, sub1.s1
-from (select p_mfgr, p_name,
-count(p_size) over (partition by p_mfgr order by p_name) as cd,
-p_retailprice,
-sum(p_retailprice) over w1  as s1
-from noop(on part
-partition by p_mfgr
-order by p_name)
-window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following)
-) sub1 ;
-
-
-explain
-select abc.p_mfgr, abc.p_name,
-rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r,
-dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr,
-count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd,
-abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1,
-abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz
-from noop(on part
-partition by p_mfgr
-order by p_name
-) abc join part p1 on abc.p_partkey = p1.p_partkey
-;
-
-
-explain create view IF NOT EXISTS mfgr_price_view as
-select p_mfgr, p_brand,
-sum(p_retailprice) as s
-from part
-group by p_mfgr, p_brand;
-
-CREATE TABLE part_4(
-p_mfgr STRING,
-p_name STRING,
-p_size INT,
-r INT,
-dr INT,
-s DOUBLE);
-
-CREATE TABLE part_5(
-p_mfgr STRING,
-p_name STRING,
-p_size INT,
-s2 INT,
-r INT,
-dr INT,
-cud DOUBLE,
-fv1 INT);
-
-explain
-from noop(on part
-partition by p_mfgr
-order by p_name)
-INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size,
-rank() over (distribute by p_mfgr sort by p_name) as r,
-dense_rank() over (distribute by p_mfgr sort by p_name) as dr,
-sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row)  as s
-INSERT OVERWRITE TABLE part_5 select  p_mfgr,p_name, p_size,
-round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2,
-rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r,
-dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr,
-cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud,
-first_value(p_size, true) over w1  as fv1
-window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following);
-
-
-explain
-select p_mfgr, p_name,
-rank() over (partition by p_mfgr,p_name) as r,
-dense_rank() over (partition by p_mfgr,p_name) as dr,
-p_size, sum(p_size) over (partition by p_mfgr,p_name rows between unbounded preceding and current row)  as s1
-from noop(on
-        noopwithmap(on
-          noop(on
-              noop(on part
-              partition by p_mfgr
-              order by p_mfgr)
-            )
-          partition by p_mfgr,p_name
-          order by p_mfgr,p_name)
-        partition by p_mfgr,p_name
-        order by p_mfgr,p_name) ;
-
-explain
-select p_mfgr, p_name,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
-from noop(on
-        noop(on
-          noop(on
-              noop(on part
-              partition by p_mfgr
-              order by p_mfgr)
-            )
-          partition by p_mfgr,p_name
-          order by p_mfgr,p_name)
-        partition by p_mfgr
-        order by p_mfgr ) ;
-
-explain
-select p_mfgr, p_name,
-rank() over (partition by p_mfgr order by p_name) as r,
-dense_rank() over (partition by p_mfgr order by p_name) as dr,
-p_size, sum(p_size) over (partition by p_mfgr order by p_name) as s1
-from noop(on
-        noop(on
-          noop(on
-              noop(on part
-              partition by p_mfgr,p_name
-              order by p_mfgr,p_name)
-            )
-          partition by p_mfgr
-          order by p_mfgr));
-
-explain select distinct src.* from src;
-
-explain select explode(array('a', 'b'));
-
-set hive.optimize.skewjoin = true;
-set hive.skewjoin.key = 2;
-
-CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4;
-
-
-explain
-FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value;
-
-FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value;
-
-
-
-explain
-select /*+ STREAMTABLE(a) */ *
-FROM T1 a JOIN T2 b ON a.key = b.key
-          JOIN T3 c ON b.key = c.key
-          JOIN T4 d ON c.key = d.key;
-
-explain
-select /*+ STREAMTABLE(a,c) */ *
-FROM T1 a JOIN T2 b ON a.key = b.key
-          JOIN T3 c ON b.key = c.key
-          JOIN T4 d ON c.key = d.key;
-
-explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-
-explain
-select * FROM
-(select src.* FROM src) x
-JOIN
-(select src.* FROM src) Y
-ON (x.key = Y.key);
-
-
-explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val;
-
-explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key;
-
-explain select count(1) from  T1 a join T1 b on a.key = b.key;
-
-explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-
-explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-
-explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-
-explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key;