You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/27 20:57:03 UTC

svn commit: r1634671 [27/46] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/parquet_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/parquet_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/parquet_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/parquet_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,367 @@
+PREHOOK: query: drop table if exists staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists parquet_jointable1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists parquet_jointable1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists parquet_jointable2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists parquet_jointable2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists parquet_jointable1_bucketed_sorted
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists parquet_jointable1_bucketed_sorted
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table staging (key int, value string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: create table staging (key int, value string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: insert into table staging select distinct key, value from src order by key limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@staging
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert into table staging select distinct key, value from src order by key limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@staging
+POSTHOOK: Lineage: staging.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: staging.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@staging
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_jointable1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_jointable1
+PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@staging
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_jointable2
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_jointable2
+PREHOOK: query: -- MR join
+
+explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- MR join
+
+explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: p2
+                  Statistics: Num rows: 4 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 217 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 217 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: myvalue (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: p1
+                  Statistics: Num rows: 74 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 37 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 37 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 
+                  1 {VALUE._col1}
+                outputColumnNames: _col7
+                Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col7 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_jointable1
+PREHOOK: Input: default@parquet_jointable2
+#### A masked pattern was here ####
+POSTHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_jointable1
+POSTHOOK: Input: default@parquet_jointable2
+#### A masked pattern was here ####
+val_0value
+val_10value
+PREHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join,
+-- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table
+-- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table
+
+-- Map join
+
+explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join,
+-- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table
+-- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table
+
+-- Map join
+
+explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: p2
+                  Statistics: Num rows: 4 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 217 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 217 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: myvalue (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: p1
+                  Statistics: Num rows: 74 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 37 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 37 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 
+                  1 {VALUE._col1}
+                outputColumnNames: _col7
+                Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col7 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 40 Data size: 162 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_jointable1
+PREHOOK: Input: default@parquet_jointable2
+#### A masked pattern was here ####
+POSTHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_jointable1
+POSTHOOK: Input: default@parquet_jointable2
+#### A masked pattern was here ####
+val_0value
+val_10value
+PREHOOK: query: -- SMB join
+
+create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_jointable1_bucketed_sorted
+POSTHOOK: query: -- SMB join
+
+create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted
+PREHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@parquet_jointable1_bucketed_sorted
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted
+POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.value EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_jointable2_bucketed_sorted
+POSTHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted
+PREHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@parquet_jointable2_bucketed_sorted
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted
+POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value1 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value2 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: p2
+                  Statistics: Num rows: 5 Data size: 526 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value2 (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: p1
+                  Statistics: Num rows: 3 Data size: 334 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0}
+                  1 {VALUE._col1}
+                outputColumnNames: _col1, _col7
+                Statistics: Num rows: 3 Data size: 346 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3 Data size: 346 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 3 Data size: 346 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_jointable1_bucketed_sorted
+PREHOOK: Input: default@parquet_jointable2_bucketed_sorted
+#### A masked pattern was here ####
+POSTHOOK: query: select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_jointable1_bucketed_sorted
+POSTHOOK: Input: default@parquet_jointable2_bucketed_sorted
+#### A masked pattern was here ####
+val_0value1	val_0value2-2
+val_10value1	val_10value2-2

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,250 @@
+PREHOOK: query: EXPLAIN
+SELECT src1.c1, count(1) 
+FROM
+(SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key AS c3, src.value AS c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4')
+GROUP BY src1.c1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT src1.c1, count(1) 
+FROM
+(SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key AS c3, src.value AS c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4')
+GROUP BY src1.c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Reducer 2 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((((key > '2') and (key < '400')) and key is not null) and (key <> '4')) and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                      Filter Operator
+                        predicate: ((_col0 < '400') and _col0 is not null) (type: boolean)
+                        Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((((((key > '1') and (key < '400')) and key is not null) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Filter Operator
+                        predicate: ((_col0 < '400') and _col0 is not null) (type: boolean)
+                        Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0}
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2'))) and ((_col2 > '50') or (_col0 < '50'))) and (_col2 <> '4')) (type: boolean)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT src1.c1, count(1) 
+FROM
+(SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key AS c3, src.value AS c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4')
+GROUP BY src1.c1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT src1.c1, count(1) 
+FROM
+(SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key AS c3, src.value AS c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4')
+GROUP BY src1.c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Reducer 2 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((((key > '2') and (key < '400')) and key is not null) and (key <> '4')) and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((((((key > '1') and (key < '400')) and key is not null) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0}
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 9 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2'))) and ((_col2 > '50') or (_col0 < '50'))) and (_col2 <> '4')) (type: boolean)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,1074 @@
+PREHOOK: query: EXPLAIN
+SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((((key > '2') and (key < '400')) and key is not null) and (key <> '4')) and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                      Filter Operator
+                        predicate: ((_col0 < '400') and _col0 is not null) (type: boolean)
+                        Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col1 (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((((((key > '1') and (key < '400')) and key is not null) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Filter Operator
+                        predicate: ((_col0 < '400') and _col0 is not null) (type: boolean)
+                        Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: string)
+                          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2'))) and ((_col2 > '50') or (_col0 < '50'))) and (_col2 <> '4')) (type: boolean)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col3 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+200	val_200
+200	val_200
+200	val_200
+200	val_200
+201	val_201
+202	val_202
+203	val_203
+203	val_203
+203	val_203
+203	val_203
+205	val_205
+205	val_205
+205	val_205
+205	val_205
+207	val_207
+207	val_207
+207	val_207
+207	val_207
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+209	val_209
+209	val_209
+209	val_209
+209	val_209
+213	val_213
+213	val_213
+213	val_213
+213	val_213
+214	val_214
+216	val_216
+216	val_216
+216	val_216
+216	val_216
+217	val_217
+217	val_217
+217	val_217
+217	val_217
+218	val_218
+219	val_219
+219	val_219
+219	val_219
+219	val_219
+221	val_221
+221	val_221
+221	val_221
+221	val_221
+222	val_222
+223	val_223
+223	val_223
+223	val_223
+223	val_223
+224	val_224
+224	val_224
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+229	val_229
+229	val_229
+229	val_229
+229	val_229
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+233	val_233
+233	val_233
+233	val_233
+233	val_233
+235	val_235
+237	val_237
+237	val_237
+237	val_237
+237	val_237
+238	val_238
+238	val_238
+238	val_238
+238	val_238
+239	val_239
+239	val_239
+239	val_239
+239	val_239
+24	val_24
+24	val_24
+24	val_24
+24	val_24
+241	val_241
+242	val_242
+242	val_242
+242	val_242
+242	val_242
+244	val_244
+247	val_247
+248	val_248
+249	val_249
+252	val_252
+255	val_255
+255	val_255
+255	val_255
+255	val_255
+256	val_256
+256	val_256
+256	val_256
+256	val_256
+257	val_257
+258	val_258
+26	val_26
+26	val_26
+26	val_26
+26	val_26
+260	val_260
+262	val_262
+263	val_263
+265	val_265
+265	val_265
+265	val_265
+265	val_265
+266	val_266
+27	val_27
+272	val_272
+272	val_272
+272	val_272
+272	val_272
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+274	val_274
+275	val_275
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+278	val_278
+278	val_278
+278	val_278
+278	val_278
+28	val_28
+280	val_280
+280	val_280
+280	val_280
+280	val_280
+281	val_281
+281	val_281
+281	val_281
+281	val_281
+282	val_282
+282	val_282
+282	val_282
+282	val_282
+283	val_283
+284	val_284
+285	val_285
+286	val_286
+287	val_287
+288	val_288
+288	val_288
+288	val_288
+288	val_288
+289	val_289
+291	val_291
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+30	val_30
+302	val_302
+305	val_305
+306	val_306
+307	val_307
+307	val_307
+307	val_307
+307	val_307
+308	val_308
+309	val_309
+309	val_309
+309	val_309
+309	val_309
+310	val_310
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+315	val_315
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+317	val_317
+317	val_317
+317	val_317
+317	val_317
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+321	val_321
+321	val_321
+321	val_321
+321	val_321
+322	val_322
+322	val_322
+322	val_322
+322	val_322
+323	val_323
+325	val_325
+325	val_325
+325	val_325
+325	val_325
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+33	val_33
+331	val_331
+331	val_331
+331	val_331
+331	val_331
+332	val_332
+333	val_333
+333	val_333
+333	val_333
+333	val_333
+335	val_335
+336	val_336
+338	val_338
+339	val_339
+34	val_34
+341	val_341
+342	val_342
+342	val_342
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+344	val_344
+344	val_344
+345	val_345
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+351	val_351
+353	val_353
+353	val_353
+353	val_353
+353	val_353
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+365	val_365
+366	val_366
+367	val_367
+367	val_367
+367	val_367
+367	val_367
+368	val_368
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+37	val_37
+37	val_37
+37	val_37
+37	val_37
+373	val_373
+374	val_374
+375	val_375
+377	val_377
+378	val_378
+379	val_379
+382	val_382
+382	val_382
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+389	val_389
+392	val_392
+393	val_393
+394	val_394
+395	val_395
+395	val_395
+395	val_395
+395	val_395
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+397	val_397
+397	val_397
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+399	val_399
+399	val_399
+PREHOOK: query: EXPLAIN
+SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((((key > '2') and (key < '400')) and key is not null) and (key <> '4')) and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((((((key > '1') and (key < '400')) and key is not null) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 9 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2'))) and ((_col2 > '50') or (_col0 < '50'))) and (_col2 <> '4')) (type: boolean)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col3 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.c1, src2.c4 
+FROM
+(SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1
+JOIN
+(SELECT src.key as c3, src.value as c4 from src where src.key > '2' ) src2
+ON src1.c1 = src2.c3 AND src1.c1 < '400'
+WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+200	val_200
+200	val_200
+200	val_200
+200	val_200
+201	val_201
+202	val_202
+203	val_203
+203	val_203
+203	val_203
+203	val_203
+205	val_205
+205	val_205
+205	val_205
+205	val_205
+207	val_207
+207	val_207
+207	val_207
+207	val_207
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+208	val_208
+209	val_209
+209	val_209
+209	val_209
+209	val_209
+213	val_213
+213	val_213
+213	val_213
+213	val_213
+214	val_214
+216	val_216
+216	val_216
+216	val_216
+216	val_216
+217	val_217
+217	val_217
+217	val_217
+217	val_217
+218	val_218
+219	val_219
+219	val_219
+219	val_219
+219	val_219
+221	val_221
+221	val_221
+221	val_221
+221	val_221
+222	val_222
+223	val_223
+223	val_223
+223	val_223
+223	val_223
+224	val_224
+224	val_224
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+229	val_229
+229	val_229
+229	val_229
+229	val_229
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+233	val_233
+233	val_233
+233	val_233
+233	val_233
+235	val_235
+237	val_237
+237	val_237
+237	val_237
+237	val_237
+238	val_238
+238	val_238
+238	val_238
+238	val_238
+239	val_239
+239	val_239
+239	val_239
+239	val_239
+24	val_24
+24	val_24
+24	val_24
+24	val_24
+241	val_241
+242	val_242
+242	val_242
+242	val_242
+242	val_242
+244	val_244
+247	val_247
+248	val_248
+249	val_249
+252	val_252
+255	val_255
+255	val_255
+255	val_255
+255	val_255
+256	val_256
+256	val_256
+256	val_256
+256	val_256
+257	val_257
+258	val_258
+26	val_26
+26	val_26
+26	val_26
+26	val_26
+260	val_260
+262	val_262
+263	val_263
+265	val_265
+265	val_265
+265	val_265
+265	val_265
+266	val_266
+27	val_27
+272	val_272
+272	val_272
+272	val_272
+272	val_272
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+273	val_273
+274	val_274
+275	val_275
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+278	val_278
+278	val_278
+278	val_278
+278	val_278
+28	val_28
+280	val_280
+280	val_280
+280	val_280
+280	val_280
+281	val_281
+281	val_281
+281	val_281
+281	val_281
+282	val_282
+282	val_282
+282	val_282
+282	val_282
+283	val_283
+284	val_284
+285	val_285
+286	val_286
+287	val_287
+288	val_288
+288	val_288
+288	val_288
+288	val_288
+289	val_289
+291	val_291
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+298	val_298
+30	val_30
+302	val_302
+305	val_305
+306	val_306
+307	val_307
+307	val_307
+307	val_307
+307	val_307
+308	val_308
+309	val_309
+309	val_309
+309	val_309
+309	val_309
+310	val_310
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+311	val_311
+315	val_315
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+316	val_316
+317	val_317
+317	val_317
+317	val_317
+317	val_317
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+318	val_318
+321	val_321
+321	val_321
+321	val_321
+321	val_321
+322	val_322
+322	val_322
+322	val_322
+322	val_322
+323	val_323
+325	val_325
+325	val_325
+325	val_325
+325	val_325
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+327	val_327
+33	val_33
+331	val_331
+331	val_331
+331	val_331
+331	val_331
+332	val_332
+333	val_333
+333	val_333
+333	val_333
+333	val_333
+335	val_335
+336	val_336
+338	val_338
+339	val_339
+34	val_34
+341	val_341
+342	val_342
+342	val_342
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+344	val_344
+344	val_344
+345	val_345
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+35	val_35
+351	val_351
+353	val_353
+353	val_353
+353	val_353
+353	val_353
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+365	val_365
+366	val_366
+367	val_367
+367	val_367
+367	val_367
+367	val_367
+368	val_368
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+369	val_369
+37	val_37
+37	val_37
+37	val_37
+37	val_37
+373	val_373
+374	val_374
+375	val_375
+377	val_377
+378	val_378
+379	val_379
+382	val_382
+382	val_382
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+389	val_389
+392	val_392
+393	val_393
+394	val_394
+395	val_395
+395	val_395
+395	val_395
+395	val_395
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+396	val_396
+397	val_397
+397	val_397
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+399	val_399
+399	val_399