You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by yc...@apache.org on 2017/04/10 14:42:04 UTC

[2/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
new file mode 100644
index 0000000..6c9b8e4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
@@ -0,0 +1,950 @@
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  -- SORT_QUERY_RESULTS
+>>>  
+>>>  
+>>>  
+>>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+No rows affected 
+>>>  
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>   
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+8 rows selected 
+>>>  
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+8 rows selected 
+>>>  
+>>>   
+>>>  
+>>>  
+>>>  
+>>>  !record