You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by yc...@apache.org on 2017/04/10 14:42:05 UTC

[3/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
new file mode 100644
index 0000000..b8a06dc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
@@ -0,0 +1,254 @@
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  set hive.merge.mapfiles=false;
+No rows affected 
+>>>  set hive.merge.mapredfiles=false; 
+No rows affected 
+>>>  
+>>>  -- Create bucketed and sorted tables
+>>>  CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_16
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_16
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_16
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_16
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  
+>>>  FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : PREHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table2
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 2
+INFO  : Launching Job 1 out of 2
+INFO  : Starting task [Stage-2:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_16.test_table1 from file:/!!ELIDED!!
+INFO  : Launching Job 2 out of 2
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-3:STATS] in serial mode
+INFO  : Starting task [Stage-1:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_16.test_table2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-5:STATS] in serial mode
+INFO  : POSTHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-2:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+No rows affected 
+>>>  
+>>>  -- Mapjoin followed by a aggregation should be performed in a single MR job
+>>>  EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: query: EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                Group By Operator'
+'                  aggregations: count()'
+'                  mode: hash'
+'                  outputColumnNames: _col0'
+'                  Reduce Output Operator'
+'                    sort order: '
+'                    value expressions: _col0 (type: bigint)'
+'      Reduce Operator Tree:'
+'        Group By Operator'
+'          aggregations: count(VALUE._col0)'
+'          mode: mergepartial'
+'          outputColumnNames: _col0'
+'          File Output Operator'
+'            compressed: false'
+'            table:'
+'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+45 rows selected 
+>>>  SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c1, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_16@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_16@test_table2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_16@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_16@test_table2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+'_c1'
+'1028'
+1 row selected 
+>>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
new file mode 100644
index 0000000..22a2d6a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
@@ -0,0 +1,955 @@
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+No rows affected 
+>>>   
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  -- SORT_QUERY_RESULTS
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+9 rows selected 
+>>>  
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+9 rows selected 
+>>>  
+>>>   
+>>>  
+>>>  
+>>>  
+>>>  !record