You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2017/04/18 07:02:56 UTC

[09/11] hive git commit: HIVE-16146: If possible find a better way to filter the TestBeeLineDriver output(Peter Vary via Zoltan Haindrich, reviewed by Vihang Karajgaonkar)

http://git-wip-us.apache.org/repos/asf/hive/blob/2509e2fa/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
index 70a37ca..c943b03 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
@@ -1,948 +1,490 @@
->>>  set hive.strict.checks.bucketing=false;
-No rows affected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : PREHOOK: type: CREATETABLE
-INFO  : PREHOOK: Output: database:smb_mapjoin_1
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_3
-INFO  : Starting task [Stage-0:DDL] in serial mode
-INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-INFO  : POSTHOOK: type: CREATETABLE
-INFO  : POSTHOOK: Output: database:smb_mapjoin_1
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_3
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
-No rows affected 
->>>  
->>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
-INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
-INFO  : PREHOOK: type: LOAD
-INFO  : PREHOOK: Input: file:/!!ELIDED!!
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Starting task [Stage-0:MOVE] in serial mode
-INFO  : Loading data to table smb_mapjoin_1.smb_bucket_1 from file:/!!ELIDED!!
-INFO  : Starting task [Stage-1:STATS] in serial mode
-INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
-INFO  : POSTHOOK: type: LOAD
-INFO  : POSTHOOK: Input: file:/!!ELIDED!!
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_1
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
-No rows affected 
->>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
-INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
-INFO  : PREHOOK: type: LOAD
-INFO  : PREHOOK: Input: file:/!!ELIDED!!
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Starting task [Stage-0:MOVE] in serial mode
-INFO  : Loading data to table smb_mapjoin_1.smb_bucket_2 from file:/!!ELIDED!!
-INFO  : Starting task [Stage-1:STATS] in serial mode
-INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
-INFO  : POSTHOOK: type: LOAD
-INFO  : POSTHOOK: Input: file:/!!ELIDED!!
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_2
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
-No rows affected 
->>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
-INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
-INFO  : PREHOOK: type: LOAD
-INFO  : PREHOOK: Input: file:/!!ELIDED!!
-INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_3
-INFO  : Starting task [Stage-0:MOVE] in serial mode
-INFO  : Loading data to table smb_mapjoin_1.smb_bucket_3 from file:/!!ELIDED!!
-INFO  : Starting task [Stage-1:STATS] in serial mode
-INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
-INFO  : POSTHOOK: type: LOAD
-INFO  : POSTHOOK: Input: file:/!!ELIDED!!
-INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_3
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
-No rows affected 
->>>  
->>>  set hive.cbo.enable=false;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  -- SORT_QUERY_RESULTS
->>>  
->>>  explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: b'
-'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
-'            Filter Operator'
-'              predicate: key is not null (type: boolean)'
-'              Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
-'              Sorted Merge Bucket Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                keys:'
-'                  0 key (type: int)'
-'                  1 key (type: int)'
-'                outputColumnNames: _col0, _col1, _col5, _col6'
-'                Select Operator'
-'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-37 rows selected 
->>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-No rows selected 
->>>  
->>>  explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+              Sorted Merge Bucket Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: b'
-'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'1','val_1','NULL','NULL'
-'3','val_3','NULL','NULL'
-'4','val_4','NULL','NULL'
-'5','val_5','NULL','NULL'
-'10','val_10','NULL','NULL'
-5 rows selected 
->>>  
->>>  explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+1	val_1	NULL	NULL
+3	val_3	NULL	NULL
+4	val_4	NULL	NULL
+5	val_5	NULL	NULL
+10	val_10	NULL	NULL
+PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: b'
-'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'NULL','NULL','20','val_20'
-'NULL','NULL','23','val_23'
-'NULL','NULL','25','val_25'
-'NULL','NULL','30','val_30'
-4 rows selected 
->>>  
->>>  explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Right Outer Join0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+NULL	NULL	20	val_20
+NULL	NULL	23	val_23
+NULL	NULL	25	val_25
+NULL	NULL	30	val_30
+PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: b'
-'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Outer Join 0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'1','val_1','NULL','NULL'
-'3','val_3','NULL','NULL'
-'4','val_4','NULL','NULL'
-'5','val_5','NULL','NULL'
-'10','val_10','NULL','NULL'
-'NULL','NULL','20','val_20'
-'NULL','NULL','23','val_23'
-'NULL','NULL','25','val_25'
-'NULL','NULL','30','val_30'
-9 rows selected 
->>>  
->>>  
->>>  explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Outer Join 0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+1	val_1	NULL	NULL
+3	val_3	NULL	NULL
+4	val_4	NULL	NULL
+5	val_5	NULL	NULL
+10	val_10	NULL	NULL
+NULL	NULL	20	val_20
+NULL	NULL	23	val_23
+NULL	NULL	25	val_25
+NULL	NULL	30	val_30
+PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: a'
-'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
-'            Filter Operator'
-'              predicate: key is not null (type: boolean)'
-'              Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
-'              Sorted Merge Bucket Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                keys:'
-'                  0 key (type: int)'
-'                  1 key (type: int)'
-'                outputColumnNames: _col0, _col1, _col5, _col6'
-'                Select Operator'
-'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-37 rows selected 
->>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-No rows selected 
->>>  
->>>  explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+              Sorted Merge Bucket Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: a'
-'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'1','val_1','NULL','NULL'
-'3','val_3','NULL','NULL'
-'4','val_4','NULL','NULL'
-'5','val_5','NULL','NULL'
-'10','val_10','NULL','NULL'
-5 rows selected 
->>>  
->>>  explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+1	val_1	NULL	NULL
+3	val_3	NULL	NULL
+4	val_4	NULL	NULL
+5	val_5	NULL	NULL
+10	val_10	NULL	NULL
+PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: a'
-'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'NULL','NULL','20','val_20'
-'NULL','NULL','23','val_23'
-'NULL','NULL','25','val_25'
-'NULL','NULL','30','val_30'
-4 rows selected 
->>>  
->>>  explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: explain
-select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
-INFO  : POSTHOOK: query: explain
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Right Outer Join0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+NULL	NULL	20	val_20
+NULL	NULL	23	val_23
+NULL	NULL	25	val_25
+NULL	NULL	30	val_30
+PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query explain
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-'Explain'
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Map Operator Tree:'
-'          TableScan'
-'            alias: a'
-'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Outer Join 0 to 1'
-'              keys:'
-'                0 key (type: int)'
-'                1 key (type: int)'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Select Operator'
-'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        ListSink'
-''
-34 rows selected 
->>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
-INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : Semantic Analysis Completed
-INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
-INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : PREHOOK: type: QUERY
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : PREHOOK: Output: file:/!!ELIDED!!
-WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
-INFO  : Query ID = !!{queryId}!!
-INFO  : Total jobs = 1
-INFO  : Launching Job 1 out of 1
-INFO  : Starting task [Stage-1:MAPRED] in serial mode
-INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
-DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
-DEBUG : adding the following namenodes' delegation tokens:[file:///]
-DEBUG : Creating splits at file:/!!ELIDED!!
-INFO  : number of splits:1
-INFO  : Submitting tokens for job: !!{jobId}}!!
-INFO  : The url to track the job: http://localhost:8080/
-INFO  : Job running in-process (local Hadoop)
-INFO  : Ended Job = !!{jobId}!!
-INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-INFO  : POSTHOOK: type: QUERY
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
-INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
-INFO  : POSTHOOK: Output: file:/!!ELIDED!!
-INFO  : MapReduce Jobs Launched: 
-INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
-INFO  : Total MapReduce CPU Time Spent: 0 msec
-INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : OK
-DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
-'a.key','a.value','b.key','b.value'
-'1','val_1','NULL','NULL'
-'3','val_3','NULL','NULL'
-'4','val_4','NULL','NULL'
-'5','val_5','NULL','NULL'
-'10','val_10','NULL','NULL'
-'NULL','NULL','20','val_20'
-'NULL','NULL','23','val_23'
-'NULL','NULL','25','val_25'
-'NULL','NULL','30','val_30'
-9 rows selected 
->>>  
->>>   
->>>  
->>>  
->>>  
->>>  !record
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+            Sorted Merge Bucket Map Join Operator
+              condition map:
+                   Outer Join 0 to 1
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+              outputColumnNames: _col0, _col1, _col5, _col6
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+#### A masked pattern was here ####
+1	val_1	NULL	NULL
+3	val_3	NULL	NULL
+4	val_4	NULL	NULL
+5	val_5	NULL	NULL
+10	val_10	NULL	NULL
+NULL	NULL	20	val_20
+NULL	NULL	23	val_23
+NULL	NULL	25	val_25
+NULL	NULL	30	val_30