You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by yc...@apache.org on 2017/04/10 14:42:03 UTC

[1/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Repository: hive
Updated Branches:
  refs/heads/master 392b6e320 -> a2ce7f3d2


http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
new file mode 100644
index 0000000..b15c951
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
@@ -0,0 +1,1805 @@
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  set hive.mapred.mode=nonstrict;
+No rows affected 
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  
+>>>  
+>>>  CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_7
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_7
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  
+>>>  
+>>>  CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_7
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_7
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  create table smb_join_results(k1 int, v1 string, k2 int, v2 string);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: query: create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_7
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_7
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+No rows affected 
+>>>  create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: query: create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_7
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string)
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_7
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string)
+No rows affected 
+>>>  create table normal_join_results(k1 int, v1 string, k2 int, v2 string);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_7
+INFO  : PREHOOK: Output: smb_mapjoin_7@normal_join_results
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_7
+INFO  : POSTHOOK: Output: smb_mapjoin_7@normal_join_results
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_bucket4_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1
+No rows affected 
+>>>  load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_bucket4_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1
+No rows affected 
+>>>  
+>>>  insert overwrite table smb_bucket4_2
+select * from default.src;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): insert overwrite table smb_bucket4_2
+select * from default.src
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): insert overwrite table smb_bucket4_2
+select * from default.src
+INFO  : PREHOOK: query: insert overwrite table smb_bucket4_2
+select * from default.src
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_bucket4_2
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_bucket4_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: insert overwrite table smb_bucket4_2
+select * from default.src
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_bucket4_2
+INFO  : POSTHOOK: Lineage: smb_bucket4_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query insert overwrite table smb_bucket4_2
+select * from default.src
+No rows affected 
+>>>  
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: query: insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 3
+INFO  : Launching Job 1 out of 3
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-7:CONDITIONAL] in serial mode
+INFO  : Stage-4 is filtered out by condition resolver.
+INFO  : Stage-3 is selected by condition resolver.
+INFO  : Stage-5 is filtered out by condition resolver.
+INFO  : Launching Job 3 out of 3
+INFO  : Starting task [Stage-3:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_join_results_empty_bigtable from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-3:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+No rows affected 
+>>>  
+>>>  insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: query: insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 3
+INFO  : Launching Job 1 out of 3
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-7:CONDITIONAL] in serial mode
+INFO  : Stage-4 is filtered out by condition resolver.
+INFO  : Stage-3 is selected by condition resolver.
+INFO  : Stage-5 is filtered out by condition resolver.
+INFO  : Launching Job 3 out of 3
+INFO  : Starting task [Stage-3:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_join_results_empty_bigtable from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results_empty_bigtable.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-3:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query insert overwrite table smb_join_results_empty_bigtable
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+No rows affected 
+>>>  
+>>>  select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:smb_join_results_empty_bigtable.k1, type:int, comment:null), FieldSchema(name:smb_join_results_empty_bigtable.v1, type:string, comment:null), FieldSchema(name:smb_join_results_empty_bigtable.k2, type:int, comment:null), FieldSchema(name:smb_join_results_empty_bigtable.v2, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2
+INFO  : PREHOOK: query: select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select * from smb_join_results_empty_bigtable order by k1, v1, k2, v2
+'smb_join_results_empty_bigtable.k1','smb_join_results_empty_bigtable.v1','smb_join_results_empty_bigtable.k2','smb_join_results_empty_bigtable.v2'
+'NULL','NULL','0','val_0'
+'NULL','NULL','0','val_0'
+'NULL','NULL','0','val_0'
+'NULL','NULL','2','val_2'
+'NULL','NULL','4','val_4'
+'NULL','NULL','5','val_5'
+'NULL','NULL','5','val_5'
+'NULL','NULL','5','val_5'
+'NULL','NULL','8','val_8'
+'NULL','NULL','9','val_9'
+'NULL','NULL','10','val_10'
+'NULL','NULL','11','val_11'
+'NULL','NULL','12','val_12'
+'NULL','NULL','12','val_12'
+'NULL','NULL','15','val_15'
+'NULL','NULL','15','val_15'
+'NULL','NULL','17','val_17'
+'NULL','NULL','18','val_18'
+'NULL','NULL','18','val_18'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','24','val_24'
+'NULL','NULL','24','val_24'
+'NULL','NULL','26','val_26'
+'NULL','NULL','26','val_26'
+'NULL','NULL','27','val_27'
+'NULL','NULL','28','val_28'
+'NULL','NULL','30','val_30'
+'NULL','NULL','33','val_33'
+'NULL','NULL','34','val_34'
+'NULL','NULL','35','val_35'
+'NULL','NULL','35','val_35'
+'NULL','NULL','35','val_35'
+'NULL','NULL','37','val_37'
+'NULL','NULL','37','val_37'
+'NULL','NULL','41','val_41'
+'NULL','NULL','42','val_42'
+'NULL','NULL','42','val_42'
+'NULL','NULL','43','val_43'
+'NULL','NULL','44','val_44'
+'NULL','NULL','47','val_47'
+'NULL','NULL','51','val_51'
+'NULL','NULL','51','val_51'
+'NULL','NULL','53','val_53'
+'NULL','NULL','54','val_54'
+'NULL','NULL','57','val_57'
+'NULL','NULL','58','val_58'
+'NULL','NULL','58','val_58'
+'NULL','NULL','64','val_64'
+'NULL','NULL','65','val_65'
+'NULL','NULL','66','val_66'
+'NULL','NULL','67','val_67'
+'NULL','NULL','67','val_67'
+'NULL','NULL','69','val_69'
+'NULL','NULL','70','val_70'
+'NULL','NULL','70','val_70'
+'NULL','NULL','70','val_70'
+'NULL','NULL','72','val_72'
+'NULL','NULL','72','val_72'
+'NULL','NULL','74','val_74'
+'NULL','NULL','76','val_76'
+'NULL','NULL','76','val_76'
+'NULL','NULL','77','val_77'
+'NULL','NULL','78','val_78'
+'NULL','NULL','80','val_80'
+'NULL','NULL','82','val_82'
+'NULL','NULL','83','val_83'
+'NULL','NULL','83','val_83'
+'NULL','NULL','84','val_84'
+'NULL','NULL','84','val_84'
+'NULL','NULL','85','val_85'
+'NULL','NULL','86','val_86'
+'NULL','NULL','87','val_87'
+'NULL','NULL','90','val_90'
+'NULL','NULL','90','val_90'
+'NULL','NULL','90','val_90'
+'NULL','NULL','92','val_92'
+'NULL','NULL','95','val_95'
+'NULL','NULL','95','val_95'
+'NULL','NULL','96','val_96'
+'NULL','NULL','97','val_97'
+'NULL','NULL','97','val_97'
+'NULL','NULL','98','val_98'
+'NULL','NULL','98','val_98'
+'NULL','NULL','100','val_100'
+'NULL','NULL','100','val_100'
+'NULL','NULL','103','val_103'
+'NULL','NULL','103','val_103'
+'NULL','NULL','104','val_104'
+'NULL','NULL','104','val_104'
+'NULL','NULL','105','val_105'
+'NULL','NULL','111','val_111'
+'NULL','NULL','113','val_113'
+'NULL','NULL','113','val_113'
+'NULL','NULL','114','val_114'
+'NULL','NULL','116','val_116'
+'NULL','NULL','118','val_118'
+'NULL','NULL','118','val_118'
+'NULL','NULL','119','val_119'
+'NULL','NULL','119','val_119'
+'NULL','NULL','119','val_119'
+'NULL','NULL','120','val_120'
+'NULL','NULL','120','val_120'
+'NULL','NULL','125','val_125'
+'NULL','NULL','125','val_125'
+'NULL','NULL','126','val_126'
+'NULL','NULL','128','val_128'
+'NULL','NULL','128','val_128'
+'NULL','NULL','128','val_128'
+'NULL','NULL','129','val_129'
+'NULL','NULL','129','val_129'
+'NULL','NULL','131','val_131'
+'NULL','NULL','133','val_133'
+'NULL','NULL','134','val_134'
+'NULL','NULL','134','val_134'
+'NULL','NULL','136','val_136'
+'NULL','NULL','137','val_137'
+'NULL','NULL','137','val_137'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','143','val_143'
+'NULL','NULL','145','val_145'
+'NULL','NULL','146','val_146'
+'NULL','NULL','146','val_146'
+'NULL','NULL','149','val_149'
+'NULL','NULL','149','val_149'
+'NULL','NULL','150','val_150'
+'NULL','NULL','152','val_152'
+'NULL','NULL','152','val_152'
+'NULL','NULL','153','val_153'
+'NULL','NULL','155','val_155'
+'NULL','NULL','156','val_156'
+'NULL','NULL','157','val_157'
+'NULL','NULL','158','val_158'
+'NULL','NULL','160','val_160'
+'NULL','NULL','162','val_162'
+'NULL','NULL','163','val_163'
+'NULL','NULL','164','val_164'
+'NULL','NULL','164','val_164'
+'NULL','NULL','165','val_165'
+'NULL','NULL','165','val_165'
+'NULL','NULL','166','val_166'
+'NULL','NULL','167','val_167'
+'NULL','NULL','167','val_167'
+'NULL','NULL','167','val_167'
+'NULL','NULL','168','val_168'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','170','val_170'
+'NULL','NULL','172','val_172'
+'NULL','NULL','172','val_172'
+'NULL','NULL','174','val_174'
+'NULL','NULL','174','val_174'
+'NULL','NULL','175','val_175'
+'NULL','NULL','175','val_175'
+'NULL','NULL','176','val_176'
+'NULL','NULL','176','val_176'
+'NULL','NULL','177','val_177'
+'NULL','NULL','178','val_178'
+'NULL','NULL','179','val_179'
+'NULL','NULL','179','val_179'
+'NULL','NULL','180','val_180'
+'NULL','NULL','181','val_181'
+'NULL','NULL','183','val_183'
+'NULL','NULL','186','val_186'
+'NULL','NULL','187','val_187'
+'NULL','NULL','187','val_187'
+'NULL','NULL','187','val_187'
+'NULL','NULL','189','val_189'
+'NULL','NULL','190','val_190'
+'NULL','NULL','191','val_191'
+'NULL','NULL','191','val_191'
+'NULL','NULL','192','val_192'
+'NULL','NULL','193','val_193'
+'NULL','NULL','193','val_193'
+'NULL','NULL','193','val_193'
+'NULL','NULL','194','val_194'
+'NULL','NULL','195','val_195'
+'NULL','NULL','195','val_195'
+'NULL','NULL','196','val_196'
+'NULL','NULL','197','val_197'
+'NULL','NULL','197','val_197'
+'NULL','NULL','199','val_199'
+'NULL','NULL','199','val_199'
+'NULL','NULL','199','val_199'
+'NULL','NULL','200','val_200'
+'NULL','NULL','200','val_200'
+'NULL','NULL','201','val_201'
+'NULL','NULL','202','val_202'
+'NULL','NULL','203','val_203'
+'NULL','NULL','203','val_203'
+'NULL','NULL','205','val_205'
+'NULL','NULL','205','val_205'
+'NULL','NULL','207','val_207'
+'NULL','NULL','207','val_207'
+'NULL','NULL','208','val_208'
+'NULL','NULL','208','val_208'
+'NULL','NULL','208','val_208'
+'NULL','NULL','209','val_209'
+'NULL','NULL','209','val_209'
+'NULL','NULL','213','val_213'
+'NULL','NULL','213','val_213'
+'NULL','NULL','214','val_214'
+'NULL','NULL','216','val_216'
+'NULL','NULL','216','val_216'
+'NULL','NULL','217','val_217'
+'NULL','NULL','217','val_217'
+'NULL','NULL','218','val_218'
+'NULL','NULL','219','val_219'
+'NULL','NULL','219','val_219'
+'NULL','NULL','221','val_221'
+'NULL','NULL','221','val_221'
+'NULL','NULL','222','val_222'
+'NULL','NULL','223','val_223'
+'NULL','NULL','223','val_223'
+'NULL','NULL','224','val_224'
+'NULL','NULL','224','val_224'
+'NULL','NULL','226','val_226'
+'NULL','NULL','228','val_228'
+'NULL','NULL','229','val_229'
+'NULL','NULL','229','val_229'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','233','val_233'
+'NULL','NULL','233','val_233'
+'NULL','NULL','235','val_235'
+'NULL','NULL','237','val_237'
+'NULL','NULL','237','val_237'
+'NULL','NULL','238','val_238'
+'NULL','NULL','238','val_238'
+'NULL','NULL','239','val_239'
+'NULL','NULL','239','val_239'
+'NULL','NULL','241','val_241'
+'NULL','NULL','242','val_242'
+'NULL','NULL','242','val_242'
+'NULL','NULL','244','val_244'
+'NULL','NULL','247','val_247'
+'NULL','NULL','248','val_248'
+'NULL','NULL','249','val_249'
+'NULL','NULL','252','val_252'
+'NULL','NULL','255','val_255'
+'NULL','NULL','255','val_255'
+'NULL','NULL','256','val_256'
+'NULL','NULL','256','val_256'
+'NULL','NULL','257','val_257'
+'NULL','NULL','258','val_258'
+'NULL','NULL','260','val_260'
+'NULL','NULL','262','val_262'
+'NULL','NULL','263','val_263'
+'NULL','NULL','265','val_265'
+'NULL','NULL','265','val_265'
+'NULL','NULL','266','val_266'
+'NULL','NULL','272','val_272'
+'NULL','NULL','272','val_272'
+'NULL','NULL','273','val_273'
+'NULL','NULL','273','val_273'
+'NULL','NULL','273','val_273'
+'NULL','NULL','274','val_274'
+'NULL','NULL','275','val_275'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','278','val_278'
+'NULL','NULL','278','val_278'
+'NULL','NULL','280','val_280'
+'NULL','NULL','280','val_280'
+'NULL','NULL','281','val_281'
+'NULL','NULL','281','val_281'
+'NULL','NULL','282','val_282'
+'NULL','NULL','282','val_282'
+'NULL','NULL','283','val_283'
+'NULL','NULL','284','val_284'
+'NULL','NULL','285','val_285'
+'NULL','NULL','286','val_286'
+'NULL','NULL','287','val_287'
+'NULL','NULL','288','val_288'
+'NULL','NULL','288','val_288'
+'NULL','NULL','289','val_289'
+'NULL','NULL','291','val_291'
+'NULL','NULL','292','val_292'
+'NULL','NULL','296','val_296'
+'NULL','NULL','298','val_298'
+'NULL','NULL','298','val_298'
+'NULL','NULL','298','val_298'
+'NULL','NULL','302','val_302'
+'NULL','NULL','305','val_305'
+'NULL','NULL','306','val_306'
+'NULL','NULL','307','val_307'
+'NULL','NULL','307','val_307'
+'NULL','NULL','308','val_308'
+'NULL','NULL','309','val_309'
+'NULL','NULL','309','val_309'
+'NULL','NULL','310','val_310'
+'NULL','NULL','311','val_311'
+'NULL','NULL','311','val_311'
+'NULL','NULL','311','val_311'
+'NULL','NULL','315','val_315'
+'NULL','NULL','316','val_316'
+'NULL','NULL','316','val_316'
+'NULL','NULL','316','val_316'
+'NULL','NULL','317','val_317'
+'NULL','NULL','317','val_317'
+'NULL','NULL','318','val_318'
+'NULL','NULL','318','val_318'
+'NULL','NULL','318','val_318'
+'NULL','NULL','321','val_321'
+'NULL','NULL','321','val_321'
+'NULL','NULL','322','val_322'
+'NULL','NULL','322','val_322'
+'NULL','NULL','323','val_323'
+'NULL','NULL','325','val_325'
+'NULL','NULL','325','val_325'
+'NULL','NULL','327','val_327'
+'NULL','NULL','327','val_327'
+'NULL','NULL','327','val_327'
+'NULL','NULL','331','val_331'
+'NULL','NULL','331','val_331'
+'NULL','NULL','332','val_332'
+'NULL','NULL','333','val_333'
+'NULL','NULL','333','val_333'
+'NULL','NULL','335','val_335'
+'NULL','NULL','336','val_336'
+'NULL','NULL','338','val_338'
+'NULL','NULL','339','val_339'
+'NULL','NULL','341','val_341'
+'NULL','NULL','342','val_342'
+'NULL','NULL','342','val_342'
+'NULL','NULL','344','val_344'
+'NULL','NULL','344','val_344'
+'NULL','NULL','345','val_345'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','351','val_351'
+'NULL','NULL','353','val_353'
+'NULL','NULL','353','val_353'
+'NULL','NULL','356','val_356'
+'NULL','NULL','360','val_360'
+'NULL','NULL','362','val_362'
+'NULL','NULL','364','val_364'
+'NULL','NULL','365','val_365'
+'NULL','NULL','366','val_366'
+'NULL','NULL','367','val_367'
+'NULL','NULL','367','val_367'
+'NULL','NULL','368','val_368'
+'NULL','NULL','369','val_369'
+'NULL','NULL','369','val_369'
+'NULL','NULL','369','val_369'
+'NULL','NULL','373','val_373'
+'NULL','NULL','374','val_374'
+'NULL','NULL','375','val_375'
+'NULL','NULL','377','val_377'
+'NULL','NULL','378','val_378'
+'NULL','NULL','379','val_379'
+'NULL','NULL','382','val_382'
+'NULL','NULL','382','val_382'
+'NULL','NULL','384','val_384'
+'NULL','NULL','384','val_384'
+'NULL','NULL','384','val_384'
+'NULL','NULL','386','val_386'
+'NULL','NULL','389','val_389'
+'NULL','NULL','392','val_392'
+'NULL','NULL','393','val_393'
+'NULL','NULL','394','val_394'
+'NULL','NULL','395','val_395'
+'NULL','NULL','395','val_395'
+'NULL','NULL','396','val_396'
+'NULL','NULL','396','val_396'
+'NULL','NULL','396','val_396'
+'NULL','NULL','397','val_397'
+'NULL','NULL','397','val_397'
+'NULL','NULL','399','val_399'
+'NULL','NULL','399','val_399'
+'NULL','NULL','400','val_400'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','402','val_402'
+'NULL','NULL','403','val_403'
+'NULL','NULL','403','val_403'
+'NULL','NULL','403','val_403'
+'NULL','NULL','404','val_404'
+'NULL','NULL','404','val_404'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','407','val_407'
+'NULL','NULL','409','val_409'
+'NULL','NULL','409','val_409'
+'NULL','NULL','409','val_409'
+'NULL','NULL','411','val_411'
+'NULL','NULL','413','val_413'
+'NULL','NULL','413','val_413'
+'NULL','NULL','414','val_414'
+'NULL','NULL','414','val_414'
+'NULL','NULL','417','val_417'
+'NULL','NULL','417','val_417'
+'NULL','NULL','417','val_417'
+'NULL','NULL','418','val_418'
+'NULL','NULL','419','val_419'
+'NULL','NULL','421','val_421'
+'NULL','NULL','424','val_424'
+'NULL','NULL','424','val_424'
+'NULL','NULL','427','val_427'
+'NULL','NULL','429','val_429'
+'NULL','NULL','429','val_429'
+'NULL','NULL','430','val_430'
+'NULL','NULL','430','val_430'
+'NULL','NULL','430','val_430'
+'NULL','NULL','431','val_431'
+'NULL','NULL','431','val_431'
+'NULL','NULL','431','val_431'
+'NULL','NULL','432','val_432'
+'NULL','NULL','435','val_435'
+'NULL','NULL','436','val_436'
+'NULL','NULL','437','val_437'
+'NULL','NULL','438','val_438'
+'NULL','NULL','438','val_438'
+'NULL','NULL','438','val_438'
+'NULL','NULL','439','val_439'
+'NULL','NULL','439','val_439'
+'NULL','NULL','443','val_443'
+'NULL','NULL','444','val_444'
+'NULL','NULL','446','val_446'
+'NULL','NULL','448','val_448'
+'NULL','NULL','449','val_449'
+'NULL','NULL','452','val_452'
+'NULL','NULL','453','val_453'
+'NULL','NULL','454','val_454'
+'NULL','NULL','454','val_454'
+'NULL','NULL','454','val_454'
+'NULL','NULL','455','val_455'
+'NULL','NULL','457','val_457'
+'NULL','NULL','458','val_458'
+'NULL','NULL','458','val_458'
+'NULL','NULL','459','val_459'
+'NULL','NULL','459','val_459'
+'NULL','NULL','460','val_460'
+'NULL','NULL','462','val_462'
+'NULL','NULL','462','val_462'
+'NULL','NULL','463','val_463'
+'NULL','NULL','463','val_463'
+'NULL','NULL','466','val_466'
+'NULL','NULL','466','val_466'
+'NULL','NULL','466','val_466'
+'NULL','NULL','467','val_467'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','470','val_470'
+'NULL','NULL','472','val_472'
+'NULL','NULL','475','val_475'
+'NULL','NULL','477','val_477'
+'NULL','NULL','478','val_478'
+'NULL','NULL','478','val_478'
+'NULL','NULL','479','val_479'
+'NULL','NULL','480','val_480'
+'NULL','NULL','480','val_480'
+'NULL','NULL','480','val_480'
+'NULL','NULL','481','val_481'
+'NULL','NULL','482','val_482'
+'NULL','NULL','483','val_483'
+'NULL','NULL','484','val_484'
+'NULL','NULL','485','val_485'
+'NULL','NULL','487','val_487'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','490','val_490'
+'NULL','NULL','491','val_491'
+'NULL','NULL','492','val_492'
+'NULL','NULL','492','val_492'
+'NULL','NULL','493','val_493'
+'NULL','NULL','494','val_494'
+'NULL','NULL','495','val_495'
+'NULL','NULL','496','val_496'
+'NULL','NULL','497','val_497'
+'NULL','NULL','498','val_498'
+'NULL','NULL','498','val_498'
+'NULL','NULL','498','val_498'
+500 rows selected 
+>>>  
+>>>  explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-9:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6'
+'  Stage-5'
+'  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7'
+'  Stage-3 depends on stages: Stage-0'
+'  Stage-4'
+'  Stage-6'
+'  Stage-7 depends on stages: Stage-6'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.TextInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                      name: smb_mapjoin_7.smb_join_results'
+''
+'  Stage: Stage-8'
+'    Conditional Operator'
+''
+'  Stage: Stage-5'
+'    Move Operator'
+'      files:'
+'          hdfs directory: true'
+'          destination: file:/!!ELIDED!!
+''
+'  Stage: Stage-0'
+'    Move Operator'
+'      tables:'
+'          replace: true'
+'          table:'
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_7.smb_join_results'
+''
+'  Stage: Stage-3'
+'    Stats-Aggr Operator'
+''
+'  Stage: Stage-4'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            File Output Operator'
+'              compressed: false'
+'              table:'
+'                  input format: org.apache.hadoop.mapred.TextInputFormat'
+'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  name: smb_mapjoin_7.smb_join_results'
+''
+'  Stage: Stage-6'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            File Output Operator'
+'              compressed: false'
+'              table:'
+'                  input format: org.apache.hadoop.mapred.TextInputFormat'
+'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  name: smb_mapjoin_7.smb_join_results'
+''
+'  Stage: Stage-7'
+'    Move Operator'
+'      files:'
+'          hdfs directory: true'
+'          destination: file:/!!ELIDED!!
+''
+87 rows selected 
+>>>  
+>>>  insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : PREHOOK: Output: smb_mapjoin_7@smb_join_results
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 3
+INFO  : Launching Job 1 out of 3
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-8:CONDITIONAL] in serial mode
+INFO  : Stage-5 is filtered out by condition resolver.
+INFO  : Stage-4 is selected by condition resolver.
+INFO  : Stage-6 is filtered out by condition resolver.
+INFO  : Launching Job 3 out of 3
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.smb_join_results from file:/!!ELIDED!!
+INFO  : Starting task [Stage-3:STATS] in serial mode
+INFO  : POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : POSTHOOK: Output: smb_mapjoin_7@smb_join_results
+INFO  : POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+No rows affected 
+>>>  
+>>>  select * from smb_join_results order by k1, v1, k2, v2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select * from smb_join_results order by k1, v1, k2, v2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:smb_join_results.k1, type:int, comment:null), FieldSchema(name:smb_join_results.v1, type:string, comment:null), FieldSchema(name:smb_join_results.k2, type:int, comment:null), FieldSchema(name:smb_join_results.v2, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select * from smb_join_results order by k1, v1, k2, v2
+INFO  : PREHOOK: query: select * from smb_join_results order by k1, v1, k2, v2
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_join_results
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select * from smb_join_results order by k1, v1, k2, v2
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_join_results
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select * from smb_join_results order by k1, v1, k2, v2
+'smb_join_results.k1','smb_join_results.v1','smb_join_results.k2','smb_join_results.v2'
+'NULL','NULL','0','val_0'
+'NULL','NULL','0','val_0'
+'NULL','NULL','0','val_0'
+'NULL','NULL','2','val_2'
+'NULL','NULL','4','val_4'
+'NULL','NULL','5','val_5'
+'NULL','NULL','5','val_5'
+'NULL','NULL','5','val_5'
+'NULL','NULL','8','val_8'
+'NULL','NULL','9','val_9'
+'NULL','NULL','10','val_10'
+'NULL','NULL','11','val_11'
+'NULL','NULL','12','val_12'
+'NULL','NULL','12','val_12'
+'NULL','NULL','15','val_15'
+'NULL','NULL','15','val_15'
+'NULL','NULL','17','val_17'
+'NULL','NULL','18','val_18'
+'NULL','NULL','18','val_18'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','24','val_24'
+'NULL','NULL','24','val_24'
+'NULL','NULL','26','val_26'
+'NULL','NULL','26','val_26'
+'NULL','NULL','27','val_27'
+'NULL','NULL','28','val_28'
+'NULL','NULL','30','val_30'
+'NULL','NULL','33','val_33'
+'NULL','NULL','34','val_34'
+'NULL','NULL','35','val_35'
+'NULL','NULL','35','val_35'
+'NULL','NULL','35','val_35'
+'NULL','NULL','37','val_37'
+'NULL','NULL','37','val_37'
+'NULL','NULL','41','val_41'
+'NULL','NULL','42','val_42'
+'NULL','NULL','42','val_42'
+'NULL','NULL','43','val_43'
+'NULL','NULL','44','val_44'
+'NULL','NULL','47','val_47'
+'NULL','NULL','51','val_51'
+'NULL','NULL','51','val_51'
+'NULL','NULL','53','val_53'
+'NULL','NULL','54','val_54'
+'NULL','NULL','57','val_57'
+'NULL','NULL','58','val_58'
+'NULL','NULL','58','val_58'
+'NULL','NULL','64','val_64'
+'NULL','NULL','65','val_65'
+'NULL','NULL','66','val_66'
+'NULL','NULL','67','val_67'
+'NULL','NULL','67','val_67'
+'NULL','NULL','69','val_69'
+'NULL','NULL','70','val_70'
+'NULL','NULL','70','val_70'
+'NULL','NULL','70','val_70'
+'NULL','NULL','72','val_72'
+'NULL','NULL','72','val_72'
+'NULL','NULL','74','val_74'
+'NULL','NULL','76','val_76'
+'NULL','NULL','76','val_76'
+'NULL','NULL','77','val_77'
+'NULL','NULL','78','val_78'
+'NULL','NULL','80','val_80'
+'NULL','NULL','82','val_82'
+'NULL','NULL','83','val_83'
+'NULL','NULL','83','val_83'
+'NULL','NULL','84','val_84'
+'NULL','NULL','84','val_84'
+'NULL','NULL','85','val_85'
+'NULL','NULL','86','val_86'
+'NULL','NULL','87','val_87'
+'NULL','NULL','90','val_90'
+'NULL','NULL','90','val_90'
+'NULL','NULL','90','val_90'
+'NULL','NULL','92','val_92'
+'NULL','NULL','95','val_95'
+'NULL','NULL','95','val_95'
+'NULL','NULL','96','val_96'
+'NULL','NULL','97','val_97'
+'NULL','NULL','97','val_97'
+'NULL','NULL','98','val_98'
+'NULL','NULL','98','val_98'
+'NULL','NULL','100','val_100'
+'NULL','NULL','100','val_100'
+'NULL','NULL','103','val_103'
+'NULL','NULL','103','val_103'
+'NULL','NULL','104','val_104'
+'NULL','NULL','104','val_104'
+'NULL','NULL','105','val_105'
+'NULL','NULL','111','val_111'
+'NULL','NULL','113','val_113'
+'NULL','NULL','113','val_113'
+'NULL','NULL','114','val_114'
+'NULL','NULL','116','val_116'
+'NULL','NULL','118','val_118'
+'NULL','NULL','118','val_118'
+'NULL','NULL','119','val_119'
+'NULL','NULL','119','val_119'
+'NULL','NULL','119','val_119'
+'NULL','NULL','120','val_120'
+'NULL','NULL','120','val_120'
+'NULL','NULL','125','val_125'
+'NULL','NULL','125','val_125'
+'NULL','NULL','126','val_126'
+'NULL','NULL','128','val_128'
+'NULL','NULL','128','val_128'
+'NULL','NULL','128','val_128'
+'NULL','NULL','129','val_129'
+'NULL','NULL','129','val_129'
+'NULL','NULL','131','val_131'
+'NULL','NULL','133','val_133'
+'NULL','NULL','134','val_134'
+'NULL','NULL','134','val_134'
+'NULL','NULL','136','val_136'
+'NULL','NULL','137','val_137'
+'NULL','NULL','137','val_137'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','138','val_138'
+'NULL','NULL','143','val_143'
+'NULL','NULL','145','val_145'
+'NULL','NULL','146','val_146'
+'NULL','NULL','146','val_146'
+'NULL','NULL','149','val_149'
+'NULL','NULL','149','val_149'
+'NULL','NULL','150','val_150'
+'NULL','NULL','152','val_152'
+'NULL','NULL','152','val_152'
+'NULL','NULL','153','val_153'
+'NULL','NULL','155','val_155'
+'NULL','NULL','156','val_156'
+'NULL','NULL','157','val_157'
+'NULL','NULL','158','val_158'
+'NULL','NULL','160','val_160'
+'NULL','NULL','162','val_162'
+'NULL','NULL','163','val_163'
+'NULL','NULL','164','val_164'
+'NULL','NULL','164','val_164'
+'NULL','NULL','165','val_165'
+'NULL','NULL','165','val_165'
+'NULL','NULL','166','val_166'
+'NULL','NULL','167','val_167'
+'NULL','NULL','167','val_167'
+'NULL','NULL','167','val_167'
+'NULL','NULL','168','val_168'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','169','val_169'
+'NULL','NULL','170','val_170'
+'NULL','NULL','172','val_172'
+'NULL','NULL','172','val_172'
+'NULL','NULL','174','val_174'
+'NULL','NULL','174','val_174'
+'NULL','NULL','175','val_175'
+'NULL','NULL','175','val_175'
+'NULL','NULL','176','val_176'
+'NULL','NULL','176','val_176'
+'NULL','NULL','177','val_177'
+'NULL','NULL','178','val_178'
+'NULL','NULL','179','val_179'
+'NULL','NULL','179','val_179'
+'NULL','NULL','180','val_180'
+'NULL','NULL','181','val_181'
+'NULL','NULL','183','val_183'
+'NULL','NULL','186','val_186'
+'NULL','NULL','187','val_187'
+'NULL','NULL','187','val_187'
+'NULL','NULL','187','val_187'
+'NULL','NULL','189','val_189'
+'NULL','NULL','190','val_190'
+'NULL','NULL','191','val_191'
+'NULL','NULL','191','val_191'
+'NULL','NULL','192','val_192'
+'NULL','NULL','193','val_193'
+'NULL','NULL','193','val_193'
+'NULL','NULL','193','val_193'
+'NULL','NULL','194','val_194'
+'NULL','NULL','195','val_195'
+'NULL','NULL','195','val_195'
+'NULL','NULL','196','val_196'
+'NULL','NULL','197','val_197'
+'NULL','NULL','197','val_197'
+'NULL','NULL','199','val_199'
+'NULL','NULL','199','val_199'
+'NULL','NULL','199','val_199'
+'NULL','NULL','200','val_200'
+'NULL','NULL','200','val_200'
+'NULL','NULL','201','val_201'
+'NULL','NULL','202','val_202'
+'NULL','NULL','203','val_203'
+'NULL','NULL','203','val_203'
+'NULL','NULL','205','val_205'
+'NULL','NULL','205','val_205'
+'NULL','NULL','207','val_207'
+'NULL','NULL','207','val_207'
+'NULL','NULL','208','val_208'
+'NULL','NULL','208','val_208'
+'NULL','NULL','208','val_208'
+'NULL','NULL','209','val_209'
+'NULL','NULL','209','val_209'
+'NULL','NULL','213','val_213'
+'NULL','NULL','213','val_213'
+'NULL','NULL','214','val_214'
+'NULL','NULL','216','val_216'
+'NULL','NULL','216','val_216'
+'NULL','NULL','217','val_217'
+'NULL','NULL','217','val_217'
+'NULL','NULL','218','val_218'
+'NULL','NULL','219','val_219'
+'NULL','NULL','219','val_219'
+'NULL','NULL','221','val_221'
+'NULL','NULL','221','val_221'
+'NULL','NULL','222','val_222'
+'NULL','NULL','223','val_223'
+'NULL','NULL','223','val_223'
+'NULL','NULL','224','val_224'
+'NULL','NULL','224','val_224'
+'NULL','NULL','226','val_226'
+'NULL','NULL','228','val_228'
+'NULL','NULL','229','val_229'
+'NULL','NULL','229','val_229'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','230','val_230'
+'NULL','NULL','233','val_233'
+'NULL','NULL','233','val_233'
+'NULL','NULL','235','val_235'
+'NULL','NULL','237','val_237'
+'NULL','NULL','237','val_237'
+'NULL','NULL','238','val_238'
+'NULL','NULL','238','val_238'
+'NULL','NULL','239','val_239'
+'NULL','NULL','239','val_239'
+'NULL','NULL','241','val_241'
+'NULL','NULL','242','val_242'
+'NULL','NULL','242','val_242'
+'NULL','NULL','244','val_244'
+'NULL','NULL','247','val_247'
+'NULL','NULL','248','val_248'
+'NULL','NULL','249','val_249'
+'NULL','NULL','252','val_252'
+'NULL','NULL','255','val_255'
+'NULL','NULL','255','val_255'
+'NULL','NULL','256','val_256'
+'NULL','NULL','256','val_256'
+'NULL','NULL','257','val_257'
+'NULL','NULL','258','val_258'
+'NULL','NULL','260','val_260'
+'NULL','NULL','262','val_262'
+'NULL','NULL','263','val_263'
+'NULL','NULL','265','val_265'
+'NULL','NULL','265','val_265'
+'NULL','NULL','266','val_266'
+'NULL','NULL','272','val_272'
+'NULL','NULL','272','val_272'
+'NULL','NULL','273','val_273'
+'NULL','NULL','273','val_273'
+'NULL','NULL','273','val_273'
+'NULL','NULL','274','val_274'
+'NULL','NULL','275','val_275'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','277','val_277'
+'NULL','NULL','278','val_278'
+'NULL','NULL','278','val_278'
+'NULL','NULL','280','val_280'
+'NULL','NULL','280','val_280'
+'NULL','NULL','281','val_281'
+'NULL','NULL','281','val_281'
+'NULL','NULL','282','val_282'
+'NULL','NULL','282','val_282'
+'NULL','NULL','283','val_283'
+'NULL','NULL','284','val_284'
+'NULL','NULL','285','val_285'
+'NULL','NULL','286','val_286'
+'NULL','NULL','287','val_287'
+'NULL','NULL','288','val_288'
+'NULL','NULL','288','val_288'
+'NULL','NULL','289','val_289'
+'NULL','NULL','291','val_291'
+'NULL','NULL','292','val_292'
+'NULL','NULL','296','val_296'
+'NULL','NULL','298','val_298'
+'NULL','NULL','298','val_298'
+'NULL','NULL','298','val_298'
+'NULL','NULL','302','val_302'
+'NULL','NULL','305','val_305'
+'NULL','NULL','306','val_306'
+'NULL','NULL','307','val_307'
+'NULL','NULL','307','val_307'
+'NULL','NULL','308','val_308'
+'NULL','NULL','309','val_309'
+'NULL','NULL','309','val_309'
+'NULL','NULL','310','val_310'
+'NULL','NULL','311','val_311'
+'NULL','NULL','311','val_311'
+'NULL','NULL','311','val_311'
+'NULL','NULL','315','val_315'
+'NULL','NULL','316','val_316'
+'NULL','NULL','316','val_316'
+'NULL','NULL','316','val_316'
+'NULL','NULL','317','val_317'
+'NULL','NULL','317','val_317'
+'NULL','NULL','318','val_318'
+'NULL','NULL','318','val_318'
+'NULL','NULL','318','val_318'
+'NULL','NULL','321','val_321'
+'NULL','NULL','321','val_321'
+'NULL','NULL','322','val_322'
+'NULL','NULL','322','val_322'
+'NULL','NULL','323','val_323'
+'NULL','NULL','325','val_325'
+'NULL','NULL','325','val_325'
+'NULL','NULL','327','val_327'
+'NULL','NULL','327','val_327'
+'NULL','NULL','327','val_327'
+'NULL','NULL','331','val_331'
+'NULL','NULL','331','val_331'
+'NULL','NULL','332','val_332'
+'NULL','NULL','333','val_333'
+'NULL','NULL','333','val_333'
+'NULL','NULL','335','val_335'
+'NULL','NULL','336','val_336'
+'NULL','NULL','338','val_338'
+'NULL','NULL','339','val_339'
+'NULL','NULL','341','val_341'
+'NULL','NULL','342','val_342'
+'NULL','NULL','342','val_342'
+'NULL','NULL','344','val_344'
+'NULL','NULL','344','val_344'
+'NULL','NULL','345','val_345'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','348','val_348'
+'NULL','NULL','351','val_351'
+'NULL','NULL','353','val_353'
+'NULL','NULL','353','val_353'
+'NULL','NULL','356','val_356'
+'NULL','NULL','360','val_360'
+'NULL','NULL','362','val_362'
+'NULL','NULL','364','val_364'
+'NULL','NULL','365','val_365'
+'NULL','NULL','366','val_366'
+'NULL','NULL','367','val_367'
+'NULL','NULL','367','val_367'
+'NULL','NULL','368','val_368'
+'NULL','NULL','369','val_369'
+'NULL','NULL','369','val_369'
+'NULL','NULL','369','val_369'
+'NULL','NULL','373','val_373'
+'NULL','NULL','374','val_374'
+'NULL','NULL','375','val_375'
+'NULL','NULL','377','val_377'
+'NULL','NULL','378','val_378'
+'NULL','NULL','379','val_379'
+'NULL','NULL','382','val_382'
+'NULL','NULL','382','val_382'
+'NULL','NULL','384','val_384'
+'NULL','NULL','384','val_384'
+'NULL','NULL','384','val_384'
+'NULL','NULL','386','val_386'
+'NULL','NULL','389','val_389'
+'NULL','NULL','392','val_392'
+'NULL','NULL','393','val_393'
+'NULL','NULL','394','val_394'
+'NULL','NULL','395','val_395'
+'NULL','NULL','395','val_395'
+'NULL','NULL','396','val_396'
+'NULL','NULL','396','val_396'
+'NULL','NULL','396','val_396'
+'NULL','NULL','397','val_397'
+'NULL','NULL','397','val_397'
+'NULL','NULL','399','val_399'
+'NULL','NULL','399','val_399'
+'NULL','NULL','400','val_400'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','401','val_401'
+'NULL','NULL','402','val_402'
+'NULL','NULL','403','val_403'
+'NULL','NULL','403','val_403'
+'NULL','NULL','403','val_403'
+'NULL','NULL','404','val_404'
+'NULL','NULL','404','val_404'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','406','val_406'
+'NULL','NULL','407','val_407'
+'NULL','NULL','409','val_409'
+'NULL','NULL','409','val_409'
+'NULL','NULL','409','val_409'
+'NULL','NULL','411','val_411'
+'NULL','NULL','413','val_413'
+'NULL','NULL','413','val_413'
+'NULL','NULL','414','val_414'
+'NULL','NULL','414','val_414'
+'NULL','NULL','417','val_417'
+'NULL','NULL','417','val_417'
+'NULL','NULL','417','val_417'
+'NULL','NULL','418','val_418'
+'NULL','NULL','419','val_419'
+'NULL','NULL','421','val_421'
+'NULL','NULL','424','val_424'
+'NULL','NULL','424','val_424'
+'NULL','NULL','427','val_427'
+'NULL','NULL','429','val_429'
+'NULL','NULL','429','val_429'
+'NULL','NULL','430','val_430'
+'NULL','NULL','430','val_430'
+'NULL','NULL','430','val_430'
+'NULL','NULL','431','val_431'
+'NULL','NULL','431','val_431'
+'NULL','NULL','431','val_431'
+'NULL','NULL','432','val_432'
+'NULL','NULL','435','val_435'
+'NULL','NULL','436','val_436'
+'NULL','NULL','437','val_437'
+'NULL','NULL','438','val_438'
+'NULL','NULL','438','val_438'
+'NULL','NULL','438','val_438'
+'NULL','NULL','439','val_439'
+'NULL','NULL','439','val_439'
+'NULL','NULL','443','val_443'
+'NULL','NULL','444','val_444'
+'NULL','NULL','446','val_446'
+'NULL','NULL','448','val_448'
+'NULL','NULL','449','val_449'
+'NULL','NULL','452','val_452'
+'NULL','NULL','453','val_453'
+'NULL','NULL','454','val_454'
+'NULL','NULL','454','val_454'
+'NULL','NULL','454','val_454'
+'NULL','NULL','455','val_455'
+'NULL','NULL','457','val_457'
+'NULL','NULL','458','val_458'
+'NULL','NULL','458','val_458'
+'NULL','NULL','459','val_459'
+'NULL','NULL','459','val_459'
+'NULL','NULL','460','val_460'
+'NULL','NULL','462','val_462'
+'NULL','NULL','462','val_462'
+'NULL','NULL','463','val_463'
+'NULL','NULL','463','val_463'
+'NULL','NULL','466','val_466'
+'NULL','NULL','466','val_466'
+'NULL','NULL','466','val_466'
+'NULL','NULL','467','val_467'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','468','val_468'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','469','val_469'
+'NULL','NULL','470','val_470'
+'NULL','NULL','472','val_472'
+'NULL','NULL','475','val_475'
+'NULL','NULL','477','val_477'
+'NULL','NULL','478','val_478'
+'NULL','NULL','478','val_478'
+'NULL','NULL','479','val_479'
+'NULL','NULL','480','val_480'
+'NULL','NULL','480','val_480'
+'NULL','NULL','480','val_480'
+'NULL','NULL','481','val_481'
+'NULL','NULL','482','val_482'
+'NULL','NULL','483','val_483'
+'NULL','NULL','484','val_484'
+'NULL','NULL','485','val_485'
+'NULL','NULL','487','val_487'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','489','val_489'
+'NULL','NULL','490','val_490'
+'NULL','NULL','491','val_491'
+'NULL','NULL','492','val_492'
+'NULL','NULL','492','val_492'
+'NULL','NULL','493','val_493'
+'NULL','NULL','494','val_494'
+'NULL','NULL','495','val_495'
+'NULL','NULL','496','val_496'
+'NULL','NULL','497','val_497'
+'NULL','NULL','498','val_498'
+'NULL','NULL','498','val_498'
+'NULL','NULL','498','val_498'
+500 rows selected 
+>>>  
+>>>  insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : PREHOOK: Output: smb_mapjoin_7@normal_join_results
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks not specified. Estimated from input data size: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:4
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_7.normal_join_results from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_1
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_bucket4_2
+INFO  : POSTHOOK: Output: smb_mapjoin_7@normal_join_results
+INFO  : POSTHOOK: Lineage: normal_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: normal_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: normal_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : POSTHOOK: Lineage: normal_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query insert overwrite table normal_join_results select * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key
+No rows affected 
+>>>  
+>>>  select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:k1, type:bigint, comment:null), FieldSchema(name:k2, type:bigint, comment:null), FieldSchema(name:v1, type:bigint, comment:null), FieldSchema(name:v2, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+INFO  : PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@normal_join_results
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@normal_join_results
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+'k1','k2','v1','v2'
+'0','130091','0','36210398070'
+1 row selected 
+>>>  select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:k1, type:bigint, comment:null), FieldSchema(name:k2, type:bigint, comment:null), FieldSchema(name:v1, type:bigint, comment:null), FieldSchema(name:v2, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+INFO  : PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_join_results
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_join_results
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+'k1','k2','v1','v2'
+'0','130091','0','36210398070'
+1 row selected 
+>>>  select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:k1, type:bigint, comment:null), FieldSchema(name:k2, type:bigint, comment:null), FieldSchema(name:v1, type:bigint, comment:null), FieldSchema(name:v2, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable
+INFO  : PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_7@smb_join_results_empty_bigtable
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_empty_bigtable
+'k1','k2','v1','v2'
+'0','130091','0','36210398070'
+1 row selected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  !record


[5/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
new file mode 100644
index 0000000..19c07a0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
@@ -0,0 +1,2563 @@
+>>>  set hive.mapred.mode=nonstrict;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  set hive.merge.mapfiles=false;
+No rows affected 
+>>>  set hive.merge.mapredfiles=false; 
+No rows affected 
+>>>  
+>>>  -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed
+>>>  
+>>>  -- Create two bucketed and sorted tables
+>>>  CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_11
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_11
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_11
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_11
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  
+>>>  FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INFO  : PREHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table1@ds=1
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table2@ds=1
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 2
+INFO  : Launching Job 1 out of 2
+INFO  : Starting task [Stage-2:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_11.test_table1 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Launching Job 2 out of 2
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-3:STATS] in serial mode
+INFO  : Starting task [Stage-1:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_11.test_table2 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-5:STATS] in serial mode
+INFO  : POSTHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table1@ds=1
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table2@ds=1
+INFO  : POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-2:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  -- Create a bucketed table
+>>>  CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_11
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_11
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  
+>>>  -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced
+>>>  EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : PREHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-4:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+'  Stage-2 depends on stages: Stage-0'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col7'
+'                Position of Big Table: 0'
+'                BucketMapJoin: true'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col7 (type: string)'
+'                  outputColumnNames: _col0, _col1'
+'                  Reduce Output Operator'
+'                    null sort order: '
+'                    sort order: '
+'                    Map-reduce partition columns: _col0 (type: int)'
+'                    tag: -1'
+'                    value expressions: _col0 (type: int), _col1 (type: string)'
+'                    auto parallelism: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [a]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: ds=1'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            partition values:'
+'              ds 1'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_11.test_table1'
+'              numFiles 16'
+'              numRows 500'
+'              partition_columns ds'
+'              partition_columns.types string'
+'              rawDataSize 5312'
+'              serialization.ddl struct test_table1 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 5812'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_11.test_table1'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table1 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_11.test_table1'
+'            name: smb_mapjoin_11.test_table1'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_11.db/test_table1/ds=1 [a]'
+'      Needs Tagging: false'
+'      Reduce Operator Tree:'
+'        Select Operator'
+'          expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)'
+'          outputColumnNames: _col0, _col1'
+'          File Output Operator'
+'            compressed: false'
+'            GlobalTableId: 1'
+'            directory: file:/!!ELIDED!!
+'            NumFilesPerFileSink: 16'
+'            Static Partition Specification: ds=1/'
+'            Stats Publishing Key Prefix: file:/!!ELIDED!!
+'            table:'
+'                input format: org.apache.hadoop.mapred.TextInputFormat'
+'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                properties:'
+'                  bucket_count 16'
+'                  bucket_field_name key'
+'                  column.name.delimiter ,'
+'                  columns key,value'
+'                  columns.comments '
+'                  columns.types int:string'
+'                  file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                  file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                  location file:/!!ELIDED!!
+'                  name smb_mapjoin_11.test_table3'
+'                  partition_columns ds'
+'                  partition_columns.types string'
+'                  serialization.ddl struct test_table3 { i32 key, string value}'
+'                  serialization.format 1'
+'                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  transient_lastDdlTime !!UNIXTIME!!'
+'                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                name: smb_mapjoin_11.test_table3'
+'            TotalFiles: 16'
+'            GatherStats: true'
+'            MultiFileSpray: true'
+''
+'  Stage: Stage-0'
+'    Move Operator'
+'      tables:'
+'          partition:'
+'            ds 1'
+'          replace: true'
+'          source: file:/!!ELIDED!!
+'          table:'
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_11.test_table3'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_11.test_table3'
+''
+'  Stage: Stage-2'
+'    Stats-Aggr Operator'
+'      Stats Aggregation Key Prefix: file:/!!ELIDED!!
+''
+167 rows selected 
+>>>  
+>>>  INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table2
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table2@ds=1
+INFO  : PREHOOK: Output: smb_mapjoin_11@test_table3@ds=1
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_11.test_table3 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table2
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table2@ds=1
+INFO  : POSTHOOK: Output: smb_mapjoin_11@test_table3@ds=1
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'
+No rows affected 
+>>>  
+>>>  SELECT * FROM test_table1 ORDER BY key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT * FROM test_table1 ORDER BY key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:test_table1.key, type:int, comment:null), FieldSchema(name:test_table1.value, type:string, comment:null), FieldSchema(name:test_table1.ds, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT * FROM test_table1 ORDER BY key
+INFO  : PREHOOK: query: SELECT * FROM test_table1 ORDER BY key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT * FROM test_table1 ORDER BY key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT * FROM test_table1 ORDER BY key
+'test_table1.key','test_table1.value','test_table1.ds'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'2','val_2','1'
+'4','val_4','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'8','val_8','1'
+'9','val_9','1'
+'10','val_10','1'
+'11','val_11','1'
+'12','val_12','1'
+'12','val_12','1'
+'15','val_15','1'
+'15','val_15','1'
+'17','val_17','1'
+'18','val_18','1'
+'18','val_18','1'
+'19','val_19','1'
+'20','val_20','1'
+'24','val_24','1'
+'24','val_24','1'
+'26','val_26','1'
+'26','val_26','1'
+'27','val_27','1'
+'28','val_28','1'
+'30','val_30','1'
+'33','val_33','1'
+'34','val_34','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'37','val_37','1'
+'37','val_37','1'
+'41','val_41','1'
+'42','val_42','1'
+'42','val_42','1'
+'43','val_43','1'
+'44','val_44','1'
+'47','val_47','1'
+'51','val_51','1'
+'51','val_51','1'
+'53','val_53','1'
+'54','val_54','1'
+'57','val_57','1'
+'58','val_58','1'
+'58','val_58','1'
+'64','val_64','1'
+'65','val_65','1'
+'66','val_66','1'
+'67','val_67','1'
+'67','val_67','1'
+'69','val_69','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'72','val_72','1'
+'72','val_72','1'
+'74','val_74','1'
+'76','val_76','1'
+'76','val_76','1'
+'77','val_77','1'
+'78','val_78','1'
+'80','val_80','1'
+'82','val_82','1'
+'83','val_83','1'
+'83','val_83','1'
+'84','val_84','1'
+'84','val_84','1'
+'85','val_85','1'
+'86','val_86','1'
+'87','val_87','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'92','val_92','1'
+'95','val_95','1'
+'95','val_95','1'
+'96','val_96','1'
+'97','val_97','1'
+'97','val_97','1'
+'98','val_98','1'
+'98','val_98','1'
+'100','val_100','1'
+'100','val_100','1'
+'103','val_103','1'
+'103','val_103','1'
+'104','val_104','1'
+'104','val_104','1'
+'105','val_105','1'
+'111','val_111','1'
+'113','val_113','1'
+'113','val_113','1'
+'114','val_114','1'
+'116','val_116','1'
+'118','val_118','1'
+'118','val_118','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'120','val_120','1'
+'120','val_120','1'
+'125','val_125','1'
+'125','val_125','1'
+'126','val_126','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'129','val_129','1'
+'129','val_129','1'
+'131','val_131','1'
+'133','val_133','1'
+'134','val_134','1'
+'134','val_134','1'
+'136','val_136','1'
+'137','val_137','1'
+'137','val_137','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'143','val_143','1'
+'145','val_145','1'
+'146','val_146','1'
+'146','val_146','1'
+'149','val_149','1'
+'149','val_149','1'
+'150','val_150','1'
+'152','val_152','1'
+'152','val_152','1'
+'153','val_153','1'
+'155','val_155','1'
+'156','val_156','1'
+'157','val_157','1'
+'158','val_158','1'
+'160','val_160','1'
+'162','val_162','1'
+'163','val_163','1'
+'164','val_164','1'
+'164','val_164','1'
+'165','val_165','1'
+'165','val_165','1'
+'166','val_166','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'168','val_168','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'170','val_170','1'
+'172','val_172','1'
+'172','val_172','1'
+'174','val_174','1'
+'174','val_174','1'
+'175','val_175','1'
+'175','val_175','1'
+'176','val_176','1'
+'176','val_176','1'
+'177','val_177','1'
+'178','val_178','1'
+'179','val_179','1'
+'179','val_179','1'
+'180','val_180','1'
+'181','val_181','1'
+'183','val_183','1'
+'186','val_186','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'189','val_189','1'
+'190','val_190','1'
+'191','val_191','1'
+'191','val_191','1'
+'192','val_192','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'194','val_194','1'
+'195','val_195','1'
+'195','val_195','1'
+'196','val_196','1'
+'197','val_197','1'
+'197','val_197','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'200','val_200','1'
+'200','val_200','1'
+'201','val_201','1'
+'202','val_202','1'
+'203','val_203','1'
+'203','val_203','1'
+'205','val_205','1'
+'205','val_205','1'
+'207','val_207','1'
+'207','val_207','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'209','val_209','1'
+'209','val_209','1'
+'213','val_213','1'
+'213','val_213','1'
+'214','val_214','1'
+'216','val_216','1'
+'216','val_216','1'
+'217','val_217','1'
+'217','val_217','1'
+'218','val_218','1'
+'219','val_219','1'
+'219','val_219','1'
+'221','val_221','1'
+'221','val_221','1'
+'222','val_222','1'
+'223','val_223','1'
+'223','val_223','1'
+'224','val_224','1'
+'224','val_224','1'
+'226','val_226','1'
+'228','val_228','1'
+'229','val_229','1'
+'229','val_229','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'233','val_233','1'
+'233','val_233','1'
+'235','val_235','1'
+'237','val_237','1'
+'237','val_237','1'
+'238','val_238','1'
+'238','val_238','1'
+'239','val_239','1'
+'239','val_239','1'
+'241','val_241','1'
+'242','val_242','1'
+'242','val_242','1'
+'244','val_244','1'
+'247','val_247','1'
+'248','val_248','1'
+'249','val_249','1'
+'252','val_252','1'
+'255','val_255','1'
+'255','val_255','1'
+'256','val_256','1'
+'256','val_256','1'
+'257','val_257','1'
+'258','val_258','1'
+'260','val_260','1'
+'262','val_262','1'
+'263','val_263','1'
+'265','val_265','1'
+'265','val_265','1'
+'266','val_266','1'
+'272','val_272','1'
+'272','val_272','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'274','val_274','1'
+'275','val_275','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'278','val_278','1'
+'278','val_278','1'
+'280','val_280','1'
+'280','val_280','1'
+'281','val_281','1'
+'281','val_281','1'
+'282','val_282','1'
+'282','val_282','1'
+'283','val_283','1'
+'284','val_284','1'
+'285','val_285','1'
+'286','val_286','1'
+'287','val_287','1'
+'288','val_288','1'
+'288','val_288','1'
+'289','val_289','1'
+'291','val_291','1'
+'292','val_292','1'
+'296','val_296','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'302','val_302','1'
+'305','val_305','1'
+'306','val_306','1'
+'307','val_307','1'
+'307','val_307','1'
+'308','val_308','1'
+'309','val_309','1'
+'309','val_309','1'
+'310','val_310','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'315','val_315','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'317','val_317','1'
+'317','val_317','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'321','val_321','1'
+'321','val_321','1'
+'322','val_322','1'
+'322','val_322','1'
+'323','val_323','1'
+'325','val_325','1'
+'325','val_325','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'331','val_331','1'
+'331','val_331','1'
+'332','val_332','1'
+'333','val_333','1'
+'333','val_333','1'
+'335','val_335','1'
+'336','val_336','1'
+'338','val_338','1'
+'339','val_339','1'
+'341','val_341','1'
+'342','val_342','1'
+'342','val_342','1'
+'344','val_344','1'
+'344','val_344','1'
+'345','val_345','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'351','val_351','1'
+'353','val_353','1'
+'353','val_353','1'
+'356','val_356','1'
+'360','val_360','1'
+'362','val_362','1'
+'364','val_364','1'
+'365','val_365','1'
+'366','val_366','1'
+'367','val_367','1'
+'367','val_367','1'
+'368','val_368','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'373','val_373','1'
+'374','val_374','1'
+'375','val_375','1'
+'377','val_377','1'
+'378','val_378','1'
+'379','val_379','1'
+'382','val_382','1'
+'382','val_382','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'386','val_386','1'
+'389','val_389','1'
+'392','val_392','1'
+'393','val_393','1'
+'394','val_394','1'
+'395','val_395','1'
+'395','val_395','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'397','val_397','1'
+'397','val_397','1'
+'399','val_399','1'
+'399','val_399','1'
+'400','val_400','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'402','val_402','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'404','val_404','1'
+'404','val_404','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'407','val_407','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'411','val_411','1'
+'413','val_413','1'
+'413','val_413','1'
+'414','val_414','1'
+'414','val_414','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'418','val_418','1'
+'419','val_419','1'
+'421','val_421','1'
+'424','val_424','1'
+'424','val_424','1'
+'427','val_427','1'
+'429','val_429','1'
+'429','val_429','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'432','val_432','1'
+'435','val_435','1'
+'436','val_436','1'
+'437','val_437','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'439','val_439','1'
+'439','val_439','1'
+'443','val_443','1'
+'444','val_444','1'
+'446','val_446','1'
+'448','val_448','1'
+'449','val_449','1'
+'452','val_452','1'
+'453','val_453','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'455','val_455','1'
+'457','val_457','1'
+'458','val_458','1'
+'458','val_458','1'
+'459','val_459','1'
+'459','val_459','1'
+'460','val_460','1'
+'462','val_462','1'
+'462','val_462','1'
+'463','val_463','1'
+'463','val_463','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'467','val_467','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'470','val_470','1'
+'472','val_472','1'
+'475','val_475','1'
+'477','val_477','1'
+'478','val_478','1'
+'478','val_478','1'
+'479','val_479','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'481','val_481','1'
+'482','val_482','1'
+'483','val_483','1'
+'484','val_484','1'
+'485','val_485','1'
+'487','val_487','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'490','val_490','1'
+'491','val_491','1'
+'492','val_492','1'
+'492','val_492','1'
+'493','val_493','1'
+'494','val_494','1'
+'495','val_495','1'
+'496','val_496','1'
+'497','val_497','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+500 rows selected 
+>>>  SELECT * FROM test_table3 ORDER BY key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT * FROM test_table3 ORDER BY key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:test_table3.key, type:int, comment:null), FieldSchema(name:test_table3.value, type:string, comment:null), FieldSchema(name:test_table3.ds, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT * FROM test_table3 ORDER BY key
+INFO  : PREHOOK: query: SELECT * FROM test_table3 ORDER BY key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT * FROM test_table3 ORDER BY key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT * FROM test_table3 ORDER BY key
+'test_table3.key','test_table3.value','test_table3.ds'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'0','val_0','1'
+'2','val_2','1'
+'4','val_4','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'5','val_5','1'
+'8','val_8','1'
+'9','val_9','1'
+'10','val_10','1'
+'11','val_11','1'
+'12','val_12','1'
+'12','val_12','1'
+'12','val_12','1'
+'12','val_12','1'
+'15','val_15','1'
+'15','val_15','1'
+'15','val_15','1'
+'15','val_15','1'
+'17','val_17','1'
+'18','val_18','1'
+'18','val_18','1'
+'18','val_18','1'
+'18','val_18','1'
+'19','val_19','1'
+'20','val_20','1'
+'24','val_24','1'
+'24','val_24','1'
+'24','val_24','1'
+'24','val_24','1'
+'26','val_26','1'
+'26','val_26','1'
+'26','val_26','1'
+'26','val_26','1'
+'27','val_27','1'
+'28','val_28','1'
+'30','val_30','1'
+'33','val_33','1'
+'34','val_34','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'35','val_35','1'
+'37','val_37','1'
+'37','val_37','1'
+'37','val_37','1'
+'37','val_37','1'
+'41','val_41','1'
+'42','val_42','1'
+'42','val_42','1'
+'42','val_42','1'
+'42','val_42','1'
+'43','val_43','1'
+'44','val_44','1'
+'47','val_47','1'
+'51','val_51','1'
+'51','val_51','1'
+'51','val_51','1'
+'51','val_51','1'
+'53','val_53','1'
+'54','val_54','1'
+'57','val_57','1'
+'58','val_58','1'
+'58','val_58','1'
+'58','val_58','1'
+'58','val_58','1'
+'64','val_64','1'
+'65','val_65','1'
+'66','val_66','1'
+'67','val_67','1'
+'67','val_67','1'
+'67','val_67','1'
+'67','val_67','1'
+'69','val_69','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'70','val_70','1'
+'72','val_72','1'
+'72','val_72','1'
+'72','val_72','1'
+'72','val_72','1'
+'74','val_74','1'
+'76','val_76','1'
+'76','val_76','1'
+'76','val_76','1'
+'76','val_76','1'
+'77','val_77','1'
+'78','val_78','1'
+'80','val_80','1'
+'82','val_82','1'
+'83','val_83','1'
+'83','val_83','1'
+'83','val_83','1'
+'83','val_83','1'
+'84','val_84','1'
+'84','val_84','1'
+'84','val_84','1'
+'84','val_84','1'
+'85','val_85','1'
+'86','val_86','1'
+'87','val_87','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'90','val_90','1'
+'92','val_92','1'
+'95','val_95','1'
+'95','val_95','1'
+'95','val_95','1'
+'95','val_95','1'
+'96','val_96','1'
+'97','val_97','1'
+'97','val_97','1'
+'97','val_97','1'
+'97','val_97','1'
+'98','val_98','1'
+'98','val_98','1'
+'98','val_98','1'
+'98','val_98','1'
+'100','val_100','1'
+'100','val_100','1'
+'100','val_100','1'
+'100','val_100','1'
+'103','val_103','1'
+'103','val_103','1'
+'103','val_103','1'
+'103','val_103','1'
+'104','val_104','1'
+'104','val_104','1'
+'104','val_104','1'
+'104','val_104','1'
+'105','val_105','1'
+'111','val_111','1'
+'113','val_113','1'
+'113','val_113','1'
+'113','val_113','1'
+'113','val_113','1'
+'114','val_114','1'
+'116','val_116','1'
+'118','val_118','1'
+'118','val_118','1'
+'118','val_118','1'
+'118','val_118','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'119','val_119','1'
+'120','val_120','1'
+'120','val_120','1'
+'120','val_120','1'
+'120','val_120','1'
+'125','val_125','1'
+'125','val_125','1'
+'125','val_125','1'
+'125','val_125','1'
+'126','val_126','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'128','val_128','1'
+'129','val_129','1'
+'129','val_129','1'
+'129','val_129','1'
+'129','val_129','1'
+'131','val_131','1'
+'133','val_133','1'
+'134','val_134','1'
+'134','val_134','1'
+'134','val_134','1'
+'134','val_134','1'
+'136','val_136','1'
+'137','val_137','1'
+'137','val_137','1'
+'137','val_137','1'
+'137','val_137','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'138','val_138','1'
+'143','val_143','1'
+'145','val_145','1'
+'146','val_146','1'
+'146','val_146','1'
+'146','val_146','1'
+'146','val_146','1'
+'149','val_149','1'
+'149','val_149','1'
+'149','val_149','1'
+'149','val_149','1'
+'150','val_150','1'
+'152','val_152','1'
+'152','val_152','1'
+'152','val_152','1'
+'152','val_152','1'
+'153','val_153','1'
+'155','val_155','1'
+'156','val_156','1'
+'157','val_157','1'
+'158','val_158','1'
+'160','val_160','1'
+'162','val_162','1'
+'163','val_163','1'
+'164','val_164','1'
+'164','val_164','1'
+'164','val_164','1'
+'164','val_164','1'
+'165','val_165','1'
+'165','val_165','1'
+'165','val_165','1'
+'165','val_165','1'
+'166','val_166','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'167','val_167','1'
+'168','val_168','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'169','val_169','1'
+'170','val_170','1'
+'172','val_172','1'
+'172','val_172','1'
+'172','val_172','1'
+'172','val_172','1'
+'174','val_174','1'
+'174','val_174','1'
+'174','val_174','1'
+'174','val_174','1'
+'175','val_175','1'
+'175','val_175','1'
+'175','val_175','1'
+'175','val_175','1'
+'176','val_176','1'
+'176','val_176','1'
+'176','val_176','1'
+'176','val_176','1'
+'177','val_177','1'
+'178','val_178','1'
+'179','val_179','1'
+'179','val_179','1'
+'179','val_179','1'
+'179','val_179','1'
+'180','val_180','1'
+'181','val_181','1'
+'183','val_183','1'
+'186','val_186','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'187','val_187','1'
+'189','val_189','1'
+'190','val_190','1'
+'191','val_191','1'
+'191','val_191','1'
+'191','val_191','1'
+'191','val_191','1'
+'192','val_192','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'194','val_194','1'
+'195','val_195','1'
+'195','val_195','1'
+'195','val_195','1'
+'195','val_195','1'
+'196','val_196','1'
+'197','val_197','1'
+'197','val_197','1'
+'197','val_197','1'
+'197','val_197','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'199','val_199','1'
+'200','val_200','1'
+'200','val_200','1'
+'200','val_200','1'
+'200','val_200','1'
+'201','val_201','1'
+'202','val_202','1'
+'203','val_203','1'
+'203','val_203','1'
+'203','val_203','1'
+'203','val_203','1'
+'205','val_205','1'
+'205','val_205','1'
+'205','val_205','1'
+'205','val_205','1'
+'207','val_207','1'
+'207','val_207','1'
+'207','val_207','1'
+'207','val_207','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'208','val_208','1'
+'209','val_209','1'
+'209','val_209','1'
+'209','val_209','1'
+'209','val_209','1'
+'213','val_213','1'
+'213','val_213','1'
+'213','val_213','1'
+'213','val_213','1'
+'214','val_214','1'
+'216','val_216','1'
+'216','val_216','1'
+'216','val_216','1'
+'216','val_216','1'
+'217','val_217','1'
+'217','val_217','1'
+'217','val_217','1'
+'217','val_217','1'
+'218','val_218','1'
+'219','val_219','1'
+'219','val_219','1'
+'219','val_219','1'
+'219','val_219','1'
+'221','val_221','1'
+'221','val_221','1'
+'221','val_221','1'
+'221','val_221','1'
+'222','val_222','1'
+'223','val_223','1'
+'223','val_223','1'
+'223','val_223','1'
+'223','val_223','1'
+'224','val_224','1'
+'224','val_224','1'
+'224','val_224','1'
+'224','val_224','1'
+'226','val_226','1'
+'228','val_228','1'
+'229','val_229','1'
+'229','val_229','1'
+'229','val_229','1'
+'229','val_229','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'230','val_230','1'
+'233','val_233','1'
+'233','val_233','1'
+'233','val_233','1'
+'233','val_233','1'
+'235','val_235','1'
+'237','val_237','1'
+'237','val_237','1'
+'237','val_237','1'
+'237','val_237','1'
+'238','val_238','1'
+'238','val_238','1'
+'238','val_238','1'
+'238','val_238','1'
+'239','val_239','1'
+'239','val_239','1'
+'239','val_239','1'
+'239','val_239','1'
+'241','val_241','1'
+'242','val_242','1'
+'242','val_242','1'
+'242','val_242','1'
+'242','val_242','1'
+'244','val_244','1'
+'247','val_247','1'
+'248','val_248','1'
+'249','val_249','1'
+'252','val_252','1'
+'255','val_255','1'
+'255','val_255','1'
+'255','val_255','1'
+'255','val_255','1'
+'256','val_256','1'
+'256','val_256','1'
+'256','val_256','1'
+'256','val_256','1'
+'257','val_257','1'
+'258','val_258','1'
+'260','val_260','1'
+'262','val_262','1'
+'263','val_263','1'
+'265','val_265','1'
+'265','val_265','1'
+'265','val_265','1'
+'265','val_265','1'
+'266','val_266','1'
+'272','val_272','1'
+'272','val_272','1'
+'272','val_272','1'
+'272','val_272','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'274','val_274','1'
+'275','val_275','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'277','val_277','1'
+'278','val_278','1'
+'278','val_278','1'
+'278','val_278','1'
+'278','val_278','1'
+'280','val_280','1'
+'280','val_280','1'
+'280','val_280','1'
+'280','val_280','1'
+'281','val_281','1'
+'281','val_281','1'
+'281','val_281','1'
+'281','val_281','1'
+'282','val_282','1'
+'282','val_282','1'
+'282','val_282','1'
+'282','val_282','1'
+'283','val_283','1'
+'284','val_284','1'
+'285','val_285','1'
+'286','val_286','1'
+'287','val_287','1'
+'288','val_288','1'
+'288','val_288','1'
+'288','val_288','1'
+'288','val_288','1'
+'289','val_289','1'
+'291','val_291','1'
+'292','val_292','1'
+'296','val_296','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'298','val_298','1'
+'302','val_302','1'
+'305','val_305','1'
+'306','val_306','1'
+'307','val_307','1'
+'307','val_307','1'
+'307','val_307','1'
+'307','val_307','1'
+'308','val_308','1'
+'309','val_309','1'
+'309','val_309','1'
+'309','val_309','1'
+'309','val_309','1'
+'310','val_310','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'311','val_311','1'
+'315','val_315','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'316','val_316','1'
+'317','val_317','1'
+'317','val_317','1'
+'317','val_317','1'
+'317','val_317','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'318','val_318','1'
+'321','val_321','1'
+'321','val_321','1'
+'321','val_321','1'
+'321','val_321','1'
+'322','val_322','1'
+'322','val_322','1'
+'322','val_322','1'
+'322','val_322','1'
+'323','val_323','1'
+'325','val_325','1'
+'325','val_325','1'
+'325','val_325','1'
+'325','val_325','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'327','val_327','1'
+'331','val_331','1'
+'331','val_331','1'
+'331','val_331','1'
+'331','val_331','1'
+'332','val_332','1'
+'333','val_333','1'
+'333','val_333','1'
+'333','val_333','1'
+'333','val_333','1'
+'335','val_335','1'
+'336','val_336','1'
+'338','val_338','1'
+'339','val_339','1'
+'341','val_341','1'
+'342','val_342','1'
+'342','val_342','1'
+'342','val_342','1'
+'342','val_342','1'
+'344','val_344','1'
+'344','val_344','1'
+'344','val_344','1'
+'344','val_344','1'
+'345','val_345','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'348','val_348','1'
+'351','val_351','1'
+'353','val_353','1'
+'353','val_353','1'
+'353','val_353','1'
+'353','val_353','1'
+'356','val_356','1'
+'360','val_360','1'
+'362','val_362','1'
+'364','val_364','1'
+'365','val_365','1'
+'366','val_366','1'
+'367','val_367','1'
+'367','val_367','1'
+'367','val_367','1'
+'367','val_367','1'
+'368','val_368','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'373','val_373','1'
+'374','val_374','1'
+'375','val_375','1'
+'377','val_377','1'
+'378','val_378','1'
+'379','val_379','1'
+'382','val_382','1'
+'382','val_382','1'
+'382','val_382','1'
+'382','val_382','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'384','val_384','1'
+'386','val_386','1'
+'389','val_389','1'
+'392','val_392','1'
+'393','val_393','1'
+'394','val_394','1'
+'395','val_395','1'
+'395','val_395','1'
+'395','val_395','1'
+'395','val_395','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'396','val_396','1'
+'397','val_397','1'
+'397','val_397','1'
+'397','val_397','1'
+'397','val_397','1'
+'399','val_399','1'
+'399','val_399','1'
+'399','val_399','1'
+'399','val_399','1'
+'400','val_400','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'402','val_402','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'403','val_403','1'
+'404','val_404','1'
+'404','val_404','1'
+'404','val_404','1'
+'404','val_404','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'406','val_406','1'
+'407','val_407','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'409','val_409','1'
+'411','val_411','1'
+'413','val_413','1'
+'413','val_413','1'
+'413','val_413','1'
+'413','val_413','1'
+'414','val_414','1'
+'414','val_414','1'
+'414','val_414','1'
+'414','val_414','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'418','val_418','1'
+'419','val_419','1'
+'421','val_421','1'
+'424','val_424','1'
+'424','val_424','1'
+'424','val_424','1'
+'424','val_424','1'
+'427','val_427','1'
+'429','val_429','1'
+'429','val_429','1'
+'429','val_429','1'
+'429','val_429','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'430','val_430','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'431','val_431','1'
+'432','val_432','1'
+'435','val_435','1'
+'436','val_436','1'
+'437','val_437','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'438','val_438','1'
+'439','val_439','1'
+'439','val_439','1'
+'439','val_439','1'
+'439','val_439','1'
+'443','val_443','1'
+'444','val_444','1'
+'446','val_446','1'
+'448','val_448','1'
+'449','val_449','1'
+'452','val_452','1'
+'453','val_453','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'454','val_454','1'
+'455','val_455','1'
+'457','val_457','1'
+'458','val_458','1'
+'458','val_458','1'
+'458','val_458','1'
+'458','val_458','1'
+'459','val_459','1'
+'459','val_459','1'
+'459','val_459','1'
+'459','val_459','1'
+'460','val_460','1'
+'462','val_462','1'
+'462','val_462','1'
+'462','val_462','1'
+'462','val_462','1'
+'463','val_463','1'
+'463','val_463','1'
+'463','val_463','1'
+'463','val_463','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'466','val_466','1'
+'467','val_467','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'468','val_468','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'469','val_469','1'
+'470','val_470','1'
+'472','val_472','1'
+'475','val_475','1'
+'477','val_477','1'
+'478','val_478','1'
+'478','val_478','1'
+'478','val_478','1'
+'478','val_478','1'
+'479','val_479','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'480','val_480','1'
+'481','val_481','1'
+'482','val_482','1'
+'483','val_483','1'
+'484','val_484','1'
+'485','val_485','1'
+'487','val_487','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'489','val_489','1'
+'490','val_490','1'
+'491','val_491','1'
+'492','val_492','1'
+'492','val_492','1'
+'492','val_492','1'
+'492','val_492','1'
+'493','val_493','1'
+'494','val_494','1'
+'495','val_495','1'
+'496','val_496','1'
+'497','val_497','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+'498','val_498','1'
+1,028 rows selected 
+>>>  EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-2:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: test_table1'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: true'
+'              predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean)'
+'              sampleDesc: BUCKET 2 OUT OF 16'
+'              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE'
+'              Select Operator'
+'                expressions: key (type: int), value (type: string), ds (type: string)'
+'                outputColumnNames: _col0, _col1, _col2'
+'                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE'
+'                File Output Operator'
+'                  compressed: false'
+'                  GlobalTableId: 0'
+'                  directory: file:/!!ELIDED!!
+'                  NumFilesPerFileSink: 1'
+'                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE'
+'                  Stats Publishing Key Prefix: file:/!!ELIDED!!
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      properties:'
+'                        columns _col0,_col1,_col2'
+'                        columns.types int:string:string'
+'                        escape.delim \'
+'                        hive.serialization.extend.additional.nesting.levels true'
+'                        serialization.escape.crlf true'
+'                        serialization.format 1'
+'                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  TotalFiles: 1'
+'                  GatherStats: false'
+'                  MultiFileSpray: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [test_table1]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: 000001_0'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            partition values:'
+'              ds 1'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_11.test_table1'
+'              numFiles 16'
+'              numRows 500'
+'              partition_columns ds'
+'              partition_columns.types string'
+'              rawDataSize 5312'
+'              serialization.ddl struct test_table1 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 5812'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_11.test_table1'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table1 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_11.test_table1'
+'            name: smb_mapjoin_11.test_table1'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_11.db/test_table1/ds=1/000001_0 [test_table1]'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+109 rows selected 
+>>>  EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-2:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: test_table3'
+'            Statistics: Num rows: 1028 Data size: 10968 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: true'
+'              predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean)'
+'              sampleDesc: BUCKET 2 OUT OF 16'
+'              Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE'
+'              Select Operator'
+'                expressions: key (type: int), value (type: string), ds (type: string)'
+'                outputColumnNames: _col0, _col1, _col2'
+'                Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE'
+'                File Output Operator'
+'                  compressed: false'
+'                  GlobalTableId: 0'
+'                  directory: file:/!!ELIDED!!
+'                  NumFilesPerFileSink: 1'
+'                  Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE'
+'                  Stats Publishing Key Prefix: file:/!!ELIDED!!
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      properties:'
+'                        columns _col0,_col1,_col2'
+'                        columns.types int:string:string'
+'                        escape.delim \'
+'                        hive.serialization.extend.additional.nesting.levels true'
+'                        serialization.escape.crlf true'
+'                        serialization.format 1'
+'                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  TotalFiles: 1'
+'                  GatherStats: false'
+'                  MultiFileSpray: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [test_table3]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: 000001_0'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            partition values:'
+'              ds 1'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_11.test_table3'
+'              numFiles 16'
+'              numRows 1028'
+'              partition_columns ds'
+'              partition_columns.types string'
+'              rawDataSize 10968'
+'              serialization.ddl struct test_table3 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 11996'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_11.test_table3'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_11.test_table3'
+'            name: smb_mapjoin_11.test_table3'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_11.db/test_table3/ds=1/000001_0 [test_table3]'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+108 rows selected 
+>>>  SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:test_table1.key, type:int, comment:null), FieldSchema(name:test_table1.value, type:string, comment:null), FieldSchema(name:test_table1.ds, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16)
+'test_table1.key','test_table1.value','test_table1.ds'
+'17','val_17','1'
+'33','val_33','1'
+'65','val_65','1'
+'97','val_97','1'
+'97','val_97','1'
+'113','val_113','1'
+'113','val_113','1'
+'129','val_129','1'
+'129','val_129','1'
+'145','val_145','1'
+'177','val_177','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'209','val_209','1'
+'209','val_209','1'
+'241','val_241','1'
+'257','val_257','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'289','val_289','1'
+'305','val_305','1'
+'321','val_321','1'
+'321','val_321','1'
+'353','val_353','1'
+'353','val_353','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'449','val_449','1'
+'481','val_481','1'
+'497','val_497','1'
+41 rows selected 
+>>>  SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:test_table3.key, type:int, comment:null), FieldSchema(name:test_table3.value, type:string, comment:null), FieldSchema(name:test_table3.ds, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16)
+'test_table3.key','test_table3.value','test_table3.ds'
+'497','val_497','1'
+'481','val_481','1'
+'449','val_449','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'417','val_417','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'401','val_401','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'369','val_369','1'
+'353','val_353','1'
+'353','val_353','1'
+'353','val_353','1'
+'353','val_353','1'
+'321','val_321','1'
+'321','val_321','1'
+'321','val_321','1'
+'321','val_321','1'
+'305','val_305','1'
+'289','val_289','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'273','val_273','1'
+'257','val_257','1'
+'241','val_241','1'
+'209','val_209','1'
+'209','val_209','1'
+'209','val_209','1'
+'209','val_209','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'193','val_193','1'
+'177','val_177','1'
+'145','val_145','1'
+'129','val_129','1'
+'129','val_129','1'
+'129','val_129','1'
+'129','val_129','1'
+'113','val_113','1'
+'113','val_113','1'
+'113','val_113','1'
+'113','val_113','1'
+'97','val_97','1'
+'97','val_97','1'
+'97','val_97','1'
+'97','val_97','1'
+'65','val_65','1'
+'33','val_33','1'
+'17','val_17','1'
+97 rows selected 
+>>>  
+>>>  -- Join data from a sampled bucket to verify the data is bucketed
+>>>  SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c0, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 2
+INFO  : Launching Job 1 out of 2
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks not specified. Estimated from input data size: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 2 out of 2
+INFO  : Starting task [Stage-2:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table1@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_11@test_table3@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-2:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+'_c0'
+'293'
+1 row selected 
+>>>  
+>>>  !record


[7/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a2ce7f3d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a2ce7f3d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a2ce7f3d

Branch: refs/heads/master
Commit: a2ce7f3d2edf404858b84f3d729a85a3660969b3
Parents: 392b6e3
Author: Yongzhi Chen <yc...@apache.org>
Authored: Mon Apr 10 10:38:52 2017 -0400
Committer: Yongzhi Chen <yc...@apache.org>
Committed: Mon Apr 10 10:38:52 2017 -0400

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/BeeLine.java   |   52 +
 .../java/org/apache/hive/beeline/Commands.java  |   55 +-
 .../test/resources/testconfiguration.properties |   12 +-
 .../hive/cli/control/CoreBeeLineDriver.java     |   17 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |    4 +-
 .../org/apache/hive/beeline/qfile/QFile.java    |  124 +-
 .../hive/beeline/qfile/QFileBeeLineClient.java  |   14 +-
 .../apache/hive/beeline/qfile/package-info.java |    4 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    2 +-
 .../beeline/drop_with_concurrency.q.out         |   47 +-
 .../beeline/escape_comments.q.out               |  267 +-
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |  948 +++++++
 .../clientpositive/beeline/smb_mapjoin_10.q.out |  248 ++
 .../clientpositive/beeline/smb_mapjoin_11.q.out | 2563 ++++++++++++++++++
 .../clientpositive/beeline/smb_mapjoin_12.q.out |  822 ++++++
 .../clientpositive/beeline/smb_mapjoin_13.q.out |  687 +++++
 .../clientpositive/beeline/smb_mapjoin_16.q.out |  254 ++
 .../clientpositive/beeline/smb_mapjoin_2.q.out  |  955 +++++++
 .../clientpositive/beeline/smb_mapjoin_3.q.out  |  950 +++++++
 .../clientpositive/beeline/smb_mapjoin_7.q.out  | 1805 ++++++++++++
 20 files changed, 9549 insertions(+), 281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 11526a7..27b353c 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -22,6 +22,7 @@
  */
 package org.apache.hive.beeline;
 
+import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.Closeable;
 import java.io.EOFException;
@@ -29,6 +30,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.io.PrintStream;
 import java.io.SequenceInputStream;
 import java.lang.reflect.InvocationTargetException;
@@ -59,6 +61,7 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Map;
@@ -1380,6 +1383,55 @@ public class BeeLine implements Closeable {
     return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
   }
 
+  String[] getCommands(File file) throws IOException {
+    List<String> cmds = new LinkedList<String>();
+    try (BufferedReader reader =
+             new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF-8"))) {
+      StringBuilder cmd = null;
+      while (true) {
+        String scriptLine = reader.readLine();
+
+        if (scriptLine == null) {
+          break;
+        }
+
+        String trimmedLine = scriptLine.trim();
+        if (getOpts().getTrimScripts()) {
+          scriptLine = trimmedLine;
+        }
+
+        if (cmd != null) {
+          // we're continuing an existing command
+          cmd.append("\n");
+          cmd.append(scriptLine);
+          if (trimmedLine.endsWith(";")) {
+            // this command has terminated
+            cmds.add(cmd.toString());
+            cmd = null;
+          }
+        } else {
+          // we're starting a new command
+          if (needsContinuation(scriptLine)) {
+            // multi-line
+            cmd = new StringBuilder(scriptLine);
+          } else {
+            // single-line
+            cmds.add(scriptLine);
+          }
+        }
+      }
+
+      if (cmd != null) {
+        // ### REVIEW: oops, somebody left the last command
+        // unterminated; should we fix it for them or complain?
+        // For now be nice and fix it.
+        cmd.append(";");
+        cmds.add(cmd.toString());
+      }
+    }
+    return cmds.toArray(new String[0]);
+  }
+
   /**
    * Print the specified message to the console
    *

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 2578728..d179b37 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -55,7 +55,6 @@ import java.util.TreeSet;
 
 import org.apache.hadoop.hive.common.cli.ShellCmdExecutor;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.SystemVariables;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
@@ -1788,60 +1787,10 @@ public class Commands {
       return false;
     }
 
-    List<String> cmds = new LinkedList<String>();
-
     try {
-      BufferedReader reader = new BufferedReader(new FileReader(
-          parts[1]));
-      try {
-        // ### NOTE: fix for sf.net bug 879427
-        StringBuilder cmd = null;
-        for (;;) {
-          String scriptLine = reader.readLine();
-
-          if (scriptLine == null) {
-            break;
-          }
-
-          String trimmedLine = scriptLine.trim();
-          if (beeLine.getOpts().getTrimScripts()) {
-            scriptLine = trimmedLine;
-          }
-
-          if (cmd != null) {
-            // we're continuing an existing command
-            cmd.append(" \n");
-            cmd.append(scriptLine);
-            if (trimmedLine.endsWith(";")) {
-              // this command has terminated
-              cmds.add(cmd.toString());
-              cmd = null;
-            }
-          } else {
-            // we're starting a new command
-            if (beeLine.needsContinuation(scriptLine)) {
-              // multi-line
-              cmd = new StringBuilder(scriptLine);
-            } else {
-              // single-line
-              cmds.add(scriptLine);
-            }
-          }
-        }
-
-        if (cmd != null) {
-          // ### REVIEW: oops, somebody left the last command
-          // unterminated; should we fix it for them or complain?
-          // For now be nice and fix it.
-          cmd.append(";");
-          cmds.add(cmd.toString());
-        }
-      } finally {
-        reader.close();
-      }
-
+      String[] cmds = beeLine.getCommands(new File(parts[1]));
       // success only if all the commands were successful
-      return beeLine.runCommands(cmds) == cmds.size();
+      return beeLine.runCommands(cmds) == cmds.length;
     } catch (Exception e) {
       return beeLine.error(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 7a70c9c..fb85b9e 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -745,7 +745,17 @@ encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_auto_purge_tables.q
 
 beeline.positive.include=drop_with_concurrency.q,\
-  escape_comments.q
+  escape_comments.q,\
+  smb_mapjoin_1.q,\
+  smb_mapjoin_10.q,\
+  smb_mapjoin_11.q,\
+  smb_mapjoin_12.q,\
+  smb_mapjoin_13.q,\
+  smb_mapjoin_16.q,\
+  smb_mapjoin_2.q,\
+  smb_mapjoin_3.q,\
+  smb_mapjoin_7.q
+
 
 minimr.query.negative.files=cluster_tasklog_retrieval.q,\
   file_with_header_footer_negative.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
index 0d63f5d..8c7057c 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
@@ -23,10 +23,10 @@ import com.google.common.base.Strings;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.hooks.PreExecutePrinter;
-import org.apache.hive.beeline.qfile.QFile;
-import org.apache.hive.beeline.qfile.QFile.QFileBuilder;
-import org.apache.hive.beeline.qfile.QFileBeeLineClient;
-import org.apache.hive.beeline.qfile.QFileBeeLineClient.QFileClientBuilder;
+import org.apache.hive.beeline.QFile;
+import org.apache.hive.beeline.QFile.QFileBuilder;
+import org.apache.hive.beeline.QFileBeeLineClient;
+import org.apache.hive.beeline.QFileBeeLineClient.QFileClientBuilder;
 import org.apache.hive.jdbc.miniHS2.MiniHS2;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -46,6 +46,7 @@ public class CoreBeeLineDriver extends CliAdapter {
   private final File testDataDirectory;
   private final File testScriptDirectory;
   private boolean overwrite = false;
+  private boolean rewriteSourceTables = true;
   private MiniHS2 miniHS2;
   private QFileClientBuilder clientBuilder;
   private QFileBuilder fileBuilder;
@@ -70,6 +71,10 @@ public class CoreBeeLineDriver extends CliAdapter {
     if (testOutputOverwrite != null && "true".equalsIgnoreCase(testOutputOverwrite)) {
       overwrite = true;
     }
+    String testRewriteSourceTables = System.getProperty("test.rewrite.source.tables");
+    if (testRewriteSourceTables != null && "false".equalsIgnoreCase(testRewriteSourceTables)) {
+      rewriteSourceTables = false;
+    }
 
     HiveConf hiveConf = new HiveConf();
     // We do not need Zookeeper at the moment
@@ -94,12 +99,10 @@ public class CoreBeeLineDriver extends CliAdapter {
         .setPassword("password");
 
     fileBuilder = new QFileBuilder()
-        .setHiveRootDirectory(hiveRootDirectory)
         .setLogDirectory(logDirectory)
         .setQueryDirectory(queryDirectory)
         .setResultsDirectory(resultsDirectory)
-        .setScratchDirectoryString(hiveConf.getVar(HiveConf.ConfVars.SCRATCHDIR))
-        .setWarehouseDirectoryString(hiveConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE));
+        .setRewriteSourceTables(rewriteSourceTables);
 
     runInfraScript(initScript, new File(logDirectory, "init.beeline"),
         new File(logDirectory, "init.raw"));

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index c820dc7..2ae1eac 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -170,7 +170,7 @@ public class QTestUtil {
   private final Set<String> qNoSessionReuseQuerySet;
   private final Set<String> qJavaVersionSpecificOutput;
   private static final String SORT_SUFFIX = ".sorted";
-  private final HashSet<String> srcTables;
+  private final Set<String> srcTables;
   private final Set<String> srcUDFs;
   private final MiniClusterType clusterType;
   private final FsType fsType;
@@ -203,7 +203,7 @@ public class QTestUtil {
   }
   private HBaseTestingUtility utility;
 
-  HashSet<String> getSrcTables() {
+  public static Set<String> getSrcTables() {
     HashSet<String> srcTables = new HashSet<String>();
     // FIXME: moved default value to here...for now
     // i think this features is never really used from the command line

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
index ae5a349..9fae194 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFile.java
@@ -16,10 +16,11 @@
  * limitations under the License.
  */
 
-package org.apache.hive.beeline.qfile;
+package org.apache.hive.beeline;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
+import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.StreamPrinter;
 
@@ -29,9 +30,8 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
+import java.util.Set;
 import java.util.regex.Pattern;
 
 /**
@@ -39,16 +39,25 @@ import java.util.regex.Pattern;
  * input and output files, and provides methods for filtering the output of the runs.
  */
 public final class QFile {
+  private static final Set<String> srcTables = QTestUtil.getSrcTables();
   private static final String DEBUG_HINT =
-      "The following files can help you identifying the problem:\n"
-      + " - Query file: %1\n"
-      + " - Raw output file: %2\n"
-      + " - Filtered output file: %3\n"
-      + " - Expected output file: %4\n"
-      + " - Client log file: %5\n"
-      + " - Client log files before the test: %6\n"
-      + " - Client log files after the test: %7\n"
-      + " - Hiveserver2 log file: %8\n";
+      "The following files can help you identifying the problem:%n"
+      + " - Query file: %1s%n"
+      + " - Raw output file: %2s%n"
+      + " - Filtered output file: %3s%n"
+      + " - Expected output file: %4s%n"
+      + " - Client log file: %5s%n"
+      + " - Client log files before the test: %6s%n"
+      + " - Client log files after the test: %7s%n"
+      + " - Hiveserver2 log file: %8s%n";
+  private static final String USE_COMMAND_WARNING =
+      "The query file %1s contains \"%2s\" command.%n"
+      + "The source table name rewrite is turned on, so this might cause problems when the used "
+      + "database contains tables named any of the following: " + srcTables + "%n"
+      + "To turn off the table name rewrite use -Dtest.rewrite.source.tables=false%n";
+
+  private static final Pattern USE_PATTERN =
+      Pattern.compile("^\\s*use\\s.*", Pattern.CASE_INSENSITIVE);
 
   private String name;
   private File inputFile;
@@ -58,8 +67,8 @@ public final class QFile {
   private File logFile;
   private File beforeExecuteLogFile;
   private File afterExecuteLogFile;
-  private static RegexFilterSet staticFilterSet = getStaticFilterSet();
-  private RegexFilterSet specificFilterSet;
+  private static RegexFilterSet filterSet = getFilterSet();
+  private boolean rewriteSourceTables;
 
   private QFile() {}
 
@@ -97,12 +106,49 @@ public final class QFile {
 
   public String getDebugHint() {
     return String.format(DEBUG_HINT, inputFile, rawOutputFile, outputFile, expectedOutputFile,
-        logFile, beforeExecuteLogFile, afterExecuteLogFile, "./itests/qtest/target/tmp/hive.log");
+        logFile, beforeExecuteLogFile, afterExecuteLogFile,
+        "./itests/qtest/target/tmp/log/hive.log");
+  }
+
+  /**
+   * Filters the sql commands if necessary.
+   * @param commands The array of the sql commands before filtering
+   * @return The filtered array of the sql command strings
+   * @throws IOException File read error
+   */
+  public String[] filterCommands(String[] commands) throws IOException {
+    if (rewriteSourceTables) {
+      for (int i=0; i<commands.length; i++) {
+        if (USE_PATTERN.matcher(commands[i]).matches()) {
+          System.err.println(String.format(USE_COMMAND_WARNING, inputFile, commands[i]));
+        }
+        commands[i] = replaceTableNames(commands[i]);
+      }
+    }
+    return commands;
+  }
+
+  /**
+   * Replace the default src database TABLE_NAMEs in the queries with default.TABLE_NAME, like
+   * src->default.src, srcpart->default.srcpart, so the queries could be run even if the used
+   * database is query specific. This change is only a best effort, since we do not want to parse
+   * the queries, we could not be sure that we do not replace other strings which are not
+   * tablenames. Like 'select src from othertable;'. The q files containing these commands should
+   * be excluded. Only replace the tablenames, if rewriteSourceTables is set.
+   * @param source The original query string
+   * @return The query string where the tablenames are replaced
+   */
+  private String replaceTableNames(String source) {
+    for (String table : srcTables) {
+      source = source.replaceAll("(?is)(\\s+)" + table + "([\\s;\\n\\)])", "$1default." + table
+          + "$2");
+    }
+    return source;
   }
 
   public void filterOutput() throws IOException {
-    String rawOutput = FileUtils.readFileToString(rawOutputFile);
-    String filteredOutput = staticFilterSet.filter(specificFilterSet.filter(rawOutput));
+    String rawOutput = FileUtils.readFileToString(rawOutputFile, "UTF-8");
+    String filteredOutput = filterSet.filter(rawOutput);
     FileUtils.writeStringToFile(outputFile, filteredOutput);
   }
 
@@ -198,7 +244,7 @@ public final class QFile {
 
   // These are the filters which are common for every QTest.
   // Check specificFilterSet for QTest specific ones.
-  private static RegexFilterSet getStaticFilterSet() {
+  private static RegexFilterSet getFilterSet() {
     // Extract the leading four digits from the unix time value.
     // Use this as a prefix in order to increase the selectivity
     // of the unix time stamp replacement regex.
@@ -209,23 +255,20 @@ public final class QFile {
     String timePattern = "(Mon|Tue|Wed|Thu|Fri|Sat|Sun) "
         + "(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
         + "\\d{2} \\d{2}:\\d{2}:\\d{2} \\w+ 20\\d{2}";
-    // Pattern to remove the timestamp and other infrastructural info from the out file
-    String logPattern = "\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d*\\s+\\S+\\s+\\[" +
-        ".*\\]\\s+\\S+:\\s+";
     String operatorPattern = "\"(CONDITION|COPY|DEPENDENCY_COLLECTION|DDL"
         + "|EXPLAIN|FETCH|FIL|FS|FUNCTION|GBY|HASHTABLEDUMMY|HASTTABLESINK|JOIN"
         + "|LATERALVIEWFORWARD|LIM|LVJ|MAP|MAPJOIN|MAPRED|MAPREDLOCAL|MOVE|OP|RS"
         + "|SCR|SEL|STATS|TS|UDTF|UNION)_\\d+\"";
 
     return new RegexFilterSet()
-        .addFilter(logPattern, "")
         .addFilter("(?s)\n[^\n]*Waiting to acquire compile lock.*?Acquired the compile lock.\n",
             "\n")
-        .addFilter("Acquired the compile lock.\n","")
+        .addFilter(".*Acquired the compile lock.\n", "")
         .addFilter("Getting log thread is interrupted, since query is done!\n", "")
         .addFilter("going to print operations logs\n", "")
         .addFilter("printed operations logs\n", "")
         .addFilter("\\(queryId=[^\\)]*\\)", "queryId=(!!{queryId}!!)")
+        .addFilter("Query ID = [\\w-]+", "Query ID = !!{queryId}!!")
         .addFilter("file:/\\w\\S+", "file:/!!ELIDED!!")
         .addFilter("pfile:/\\w\\S+", "pfile:/!!ELIDED!!")
         .addFilter("hdfs:/\\w\\S+", "hdfs:/!!ELIDED!!")
@@ -235,7 +278,12 @@ public final class QFile {
         .addFilter("(\\D)" + currentTimePrefix + "\\d{9}(\\D)", "$1!!UNIXTIMEMILLIS!!$2")
         .addFilter(userName, "!!{user.name}!!")
         .addFilter(operatorPattern, "\"$1_!!ELIDED!!\"")
-        .addFilter("Time taken: [0-9\\.]* seconds", "Time taken: !!ELIDED!! seconds");
+        .addFilter("(?i)Time taken: [0-9\\.]* sec", "Time taken: !!ELIDED!! sec")
+        .addFilter(" job(:?) job_\\w+([\\s\n])", " job$1 !!{jobId}}!!$2")
+        .addFilter("Ended Job = job_\\w+([\\s\n])", "Ended Job = !!{jobId}!!$1")
+        .addFilter(".*\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}.* map = .*\n", "")
+        .addFilter("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\s+", "")
+        .addFilter("maximum memory = \\d*", "maximum memory = !!ELIDED!!");
   }
 
   /**
@@ -246,9 +294,7 @@ public final class QFile {
     private File queryDirectory;
     private File logDirectory;
     private File resultsDirectory;
-    private String scratchDirectoryString;
-    private String warehouseDirectoryString;
-    private File hiveRootDirectory;
+    private boolean rewriteSourceTables;
 
     public QFileBuilder() {
     }
@@ -268,18 +314,8 @@ public final class QFile {
       return this;
     }
 
-    public QFileBuilder setScratchDirectoryString(String scratchDirectoryString) {
-      this.scratchDirectoryString = scratchDirectoryString;
-      return this;
-    }
-
-    public QFileBuilder setWarehouseDirectoryString(String warehouseDirectoryString) {
-      this.warehouseDirectoryString = warehouseDirectoryString;
-      return this;
-    }
-
-    public QFileBuilder setHiveRootDirectory(File hiveRootDirectory) {
-      this.hiveRootDirectory = hiveRootDirectory;
+    public QFileBuilder setRewriteSourceTables(boolean rewriteSourceTables) {
+      this.rewriteSourceTables = rewriteSourceTables;
       return this;
     }
 
@@ -293,15 +329,7 @@ public final class QFile {
       result.logFile = new File(logDirectory, name + ".q.beeline");
       result.beforeExecuteLogFile = new File(logDirectory, name + ".q.beforeExecute.log");
       result.afterExecuteLogFile = new File(logDirectory, name + ".q.afterExecute.log");
-      // These are the filters which are specific for the given QTest.
-      // Check staticFilterSet for common filters.
-      result.specificFilterSet = new RegexFilterSet()
-          .addFilter(scratchDirectoryString + "[\\w\\-/]+", "!!{hive.exec.scratchdir}!!")
-          .addFilter(warehouseDirectoryString, "!!{hive.metastore.warehouse.dir}!!")
-          .addFilter(resultsDirectory.getAbsolutePath(), "!!{expectedDirectory}!!")
-          .addFilter(logDirectory.getAbsolutePath(), "!!{outputDirectory}!!")
-          .addFilter(queryDirectory.getAbsolutePath(), "!!{qFileDirectory}!!")
-          .addFilter(hiveRootDirectory.getAbsolutePath(), "!!{hive.root}!!");
+      result.rewriteSourceTables = rewriteSourceTables;
       return result;
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
index 760fde6..7244bf8 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/QFileBeeLineClient.java
@@ -16,9 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hive.beeline.qfile;
-
-import org.apache.hive.beeline.BeeLine;
+package org.apache.hive.beeline;
 
 import java.io.File;
 import java.io.IOException;
@@ -46,6 +44,7 @@ public class QFileBeeLineClient implements AutoCloseable {
           "!set shownestederrs true",
           "!set showwarnings true",
           "!set showelapsedtime false",
+          "!set trimscripts false",
           "!set maxwidth -1",
           "!connect " + jdbcUrl + " " + username + " " + password + " " + jdbcDriver
         });
@@ -87,13 +86,10 @@ public class QFileBeeLineClient implements AutoCloseable {
         qFile.getAfterExecuteLogFile());
   }
 
-  public void execute(QFile qFile) throws SQLException {
+  public void execute(QFile qFile) throws SQLException, IOException {
     beforeExecute(qFile);
-    execute(
-        new String[] {
-          "!run " + qFile.getInputFile().getAbsolutePath()
-        },
-        qFile.getRawOutputFile());
+    String[] commands = beeLine.getCommands(qFile.getInputFile());
+    execute(qFile.filterCommands(commands), qFile.getRawOutputFile());
     afterExecute(qFile);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java b/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
index fcd50ec..e05ac0a 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/qfile/package-info.java
@@ -17,6 +17,6 @@
  */
 
 /**
- * Package for the BeeLine specific QTest file classes.
+ * Package for the BeeLine specific QTest classes.
  */
-package org.apache.hive.beeline.qfile;
+package org.apache.hive.beeline;

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 79955e9..b0657f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -535,7 +535,7 @@ public final class Utilities {
   public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
     String useName = conf.get(INPUT_NAME);
     if (useName == null) {
-      useName = "mapreduce";
+      useName = "mapreduce:" + hiveScratchDir;
     }
     conf.set(INPUT_NAME, useName);
     setMapWork(conf, w.getMapWork(), hiveScratchDir, true);

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/drop_with_concurrency.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/drop_with_concurrency.q.out b/ql/src/test/results/clientpositive/beeline/drop_with_concurrency.q.out
index 385f9b7..2bffdf0 100644
--- a/ql/src/test/results/clientpositive/beeline/drop_with_concurrency.q.out
+++ b/ql/src/test/results/clientpositive/beeline/drop_with_concurrency.q.out
@@ -1,4 +1,3 @@
->>>  !run !!{qFileDirectory}!!/drop_with_concurrency.q
 >>>  set hive.lock.numretries=1;
 No rows affected 
 >>>  set hive.lock.sleep.between.retries=1;
@@ -9,54 +8,54 @@ No rows affected
 No rows affected 
 >>>  
 >>>  drop table if exists drop_with_concurrency_1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): drop table if exists drop_with_concurrency_1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): drop table if exists drop_with_concurrency_1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): drop table if exists drop_with_concurrency_1
-ERROR : PREHOOK: query: drop table if exists drop_with_concurrency_1
-ERROR : PREHOOK: type: DROPTABLE
+INFO  : PREHOOK: query: drop table if exists drop_with_concurrency_1
+INFO  : PREHOOK: type: DROPTABLE
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: drop table if exists drop_with_concurrency_1
-ERROR : POSTHOOK: type: DROPTABLE
+INFO  : POSTHOOK: query: drop table if exists drop_with_concurrency_1
+INFO  : POSTHOOK: type: DROPTABLE
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query drop table if exists drop_with_concurrency_1
 No rows affected 
 >>>  create table drop_with_concurrency_1 (c1 int);
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): create table drop_with_concurrency_1 (c1 int)
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table drop_with_concurrency_1 (c1 int)
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): create table drop_with_concurrency_1 (c1 int)
-ERROR : PREHOOK: query: create table drop_with_concurrency_1 (c1 int)
-ERROR : PREHOOK: type: CREATETABLE
-ERROR : PREHOOK: Output: database:drop_with_concurrency
-ERROR : PREHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
+INFO  : PREHOOK: query: create table drop_with_concurrency_1 (c1 int)
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:drop_with_concurrency
+INFO  : PREHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: create table drop_with_concurrency_1 (c1 int)
-ERROR : POSTHOOK: type: CREATETABLE
-ERROR : POSTHOOK: Output: database:drop_with_concurrency
-ERROR : POSTHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
+INFO  : POSTHOOK: query: create table drop_with_concurrency_1 (c1 int)
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:drop_with_concurrency
+INFO  : POSTHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query create table drop_with_concurrency_1 (c1 int)
 No rows affected 
 >>>  drop table drop_with_concurrency_1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): drop table drop_with_concurrency_1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): drop table drop_with_concurrency_1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): drop table drop_with_concurrency_1
-ERROR : PREHOOK: query: drop table drop_with_concurrency_1
-ERROR : PREHOOK: type: DROPTABLE
-ERROR : PREHOOK: Input: drop_with_concurrency@drop_with_concurrency_1
-ERROR : PREHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
+INFO  : PREHOOK: query: drop table drop_with_concurrency_1
+INFO  : PREHOOK: type: DROPTABLE
+INFO  : PREHOOK: Input: drop_with_concurrency@drop_with_concurrency_1
+INFO  : PREHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: drop table drop_with_concurrency_1
-ERROR : POSTHOOK: type: DROPTABLE
-ERROR : POSTHOOK: Input: drop_with_concurrency@drop_with_concurrency_1
-ERROR : POSTHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
+INFO  : POSTHOOK: query: drop table drop_with_concurrency_1
+INFO  : POSTHOOK: type: DROPTABLE
+INFO  : POSTHOOK: Input: drop_with_concurrency@drop_with_concurrency_1
+INFO  : POSTHOOK: Output: drop_with_concurrency@drop_with_concurrency_1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query drop table drop_with_concurrency_1

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
index abc0fee..b440d9c 100644
--- a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
@@ -1,130 +1,129 @@
->>>  !run !!{qFileDirectory}!!/escape_comments.q
 >>>  create database escape_comments_db comment 'a\nb';
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): create database escape_comments_db comment 'a\nb'
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create database escape_comments_db comment 'a\nb'
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): create database escape_comments_db comment 'a\nb'
-ERROR : PREHOOK: query: create database escape_comments_db comment 'a\nb'
-ERROR : PREHOOK: type: CREATEDATABASE
-ERROR : PREHOOK: Output: database:escape_comments_db
+INFO  : PREHOOK: query: create database escape_comments_db comment 'a\nb'
+INFO  : PREHOOK: type: CREATEDATABASE
+INFO  : PREHOOK: Output: database:escape_comments_db
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: create database escape_comments_db comment 'a\nb'
-ERROR : POSTHOOK: type: CREATEDATABASE
-ERROR : POSTHOOK: Output: database:escape_comments_db
+INFO  : POSTHOOK: query: create database escape_comments_db comment 'a\nb'
+INFO  : POSTHOOK: type: CREATEDATABASE
+INFO  : POSTHOOK: Output: database:escape_comments_db
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query create database escape_comments_db comment 'a\nb'
 No rows affected 
 >>>  use escape_comments_db;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): use escape_comments_db
+INFO  : Compiling commandqueryId=(!!{queryId}!!): use escape_comments_db
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): use escape_comments_db
-ERROR : PREHOOK: query: use escape_comments_db
-ERROR : PREHOOK: type: SWITCHDATABASE
-ERROR : PREHOOK: Input: database:escape_comments_db
+INFO  : PREHOOK: query: use escape_comments_db
+INFO  : PREHOOK: type: SWITCHDATABASE
+INFO  : PREHOOK: Input: database:escape_comments_db
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: use escape_comments_db
-ERROR : POSTHOOK: type: SWITCHDATABASE
-ERROR : POSTHOOK: Input: database:escape_comments_db
+INFO  : POSTHOOK: query: use escape_comments_db
+INFO  : POSTHOOK: type: SWITCHDATABASE
+INFO  : POSTHOOK: Input: database:escape_comments_db
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query use escape_comments_db
 No rows affected 
->>>  create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+>>>  create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb');
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
-ERROR : PREHOOK: query: create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+INFO  : PREHOOK: query: create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
-ERROR : PREHOOK: type: CREATETABLE
-ERROR : PREHOOK: Output: database:escape_comments_db
-ERROR : PREHOOK: Output: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:escape_comments_db
+INFO  : PREHOOK: Output: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+INFO  : POSTHOOK: query: create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
-ERROR : POSTHOOK: type: CREATETABLE
-ERROR : POSTHOOK: Output: database:escape_comments_db
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:escape_comments_db
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
-DEBUG : Shutting down query create table escape_comments_tbl1 
-(col1 string comment 'a\nb\'\;') comment 'a\nb' 
+DEBUG : Shutting down query create table escape_comments_tbl1
+(col1 string comment 'a\nb\'\;') comment 'a\nb'
 partitioned by (p1 string comment 'a\nb')
 No rows affected 
->>>  create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+>>>  create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col1, type:string, comment:null)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
-INFO  : Executing commandqueryId=(!!{queryId}!!): create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+INFO  : Executing commandqueryId=(!!{queryId}!!): create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1
-ERROR : PREHOOK: query: create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+INFO  : PREHOOK: query: create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1
-ERROR : PREHOOK: type: CREATEVIEW
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
-ERROR : PREHOOK: Output: database:escape_comments_db
-ERROR : PREHOOK: Output: escape_comments_db@escape_comments_view1
+INFO  : PREHOOK: type: CREATEVIEW
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: Output: database:escape_comments_db
+INFO  : PREHOOK: Output: escape_comments_db@escape_comments_view1
 INFO  : Starting task [Stage-1:DDL] in serial mode
-ERROR : POSTHOOK: query: create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+INFO  : POSTHOOK: query: create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1
-ERROR : POSTHOOK: type: CREATEVIEW
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
-ERROR : POSTHOOK: Output: database:escape_comments_db
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_view1
-ERROR : POSTHOOK: Lineage: escape_comments_view1.col1 SIMPLE [(escape_comments_tbl1)escape_comments_tbl1.FieldSchema(name:col1, type:string, comment:a
+INFO  : POSTHOOK: type: CREATEVIEW
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: Output: database:escape_comments_db
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_view1
+INFO  : POSTHOOK: Lineage: escape_comments_view1.col1 SIMPLE [(escape_comments_tbl1)escape_comments_tbl1.FieldSchema(name:col1, type:string, comment:a
 b';), ]
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
-DEBUG : Shutting down query create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb' 
+DEBUG : Shutting down query create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
 as select col1 from escape_comments_tbl1
 No rows affected 
 >>>  create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb';
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-ERROR : PREHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-ERROR : PREHOOK: type: CREATEINDEX
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
+INFO  : PREHOOK: type: CREATEINDEX
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-ERROR : POSTHOOK: type: CREATEINDEX
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
+INFO  : POSTHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
+INFO  : POSTHOOK: type: CREATEINDEX
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
 No rows affected 
 >>>  
 >>>  describe database extended escape_comments_db;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe database extended escape_comments_db
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe database extended escape_comments_db
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:db_name, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer), FieldSchema(name:location, type:string, comment:from deserializer), FieldSchema(name:owner_name, type:string, comment:from deserializer), FieldSchema(name:owner_type, type:string, comment:from deserializer), FieldSchema(name:parameters, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe database extended escape_comments_db
-ERROR : PREHOOK: query: describe database extended escape_comments_db
-ERROR : PREHOOK: type: DESCDATABASE
-ERROR : PREHOOK: Input: database:escape_comments_db
+INFO  : PREHOOK: query: describe database extended escape_comments_db
+INFO  : PREHOOK: type: DESCDATABASE
+INFO  : PREHOOK: Input: database:escape_comments_db
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe database extended escape_comments_db
-ERROR : POSTHOOK: type: DESCDATABASE
-ERROR : POSTHOOK: Input: database:escape_comments_db
+INFO  : POSTHOOK: query: describe database extended escape_comments_db
+INFO  : POSTHOOK: type: DESCDATABASE
+INFO  : POSTHOOK: Input: database:escape_comments_db
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe database extended escape_comments_db
@@ -132,18 +131,18 @@ DEBUG : Shutting down query describe database extended escape_comments_db
 'escape_comments_db','a\nb','location/in/test','user','USER',''
 1 row selected 
 >>>  describe database escape_comments_db;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe database escape_comments_db
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe database escape_comments_db
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:db_name, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer), FieldSchema(name:location, type:string, comment:from deserializer), FieldSchema(name:owner_name, type:string, comment:from deserializer), FieldSchema(name:owner_type, type:string, comment:from deserializer), FieldSchema(name:parameters, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe database escape_comments_db
-ERROR : PREHOOK: query: describe database escape_comments_db
-ERROR : PREHOOK: type: DESCDATABASE
-ERROR : PREHOOK: Input: database:escape_comments_db
+INFO  : PREHOOK: query: describe database escape_comments_db
+INFO  : PREHOOK: type: DESCDATABASE
+INFO  : PREHOOK: Input: database:escape_comments_db
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe database escape_comments_db
-ERROR : POSTHOOK: type: DESCDATABASE
-ERROR : POSTHOOK: Input: database:escape_comments_db
+INFO  : POSTHOOK: query: describe database escape_comments_db
+INFO  : POSTHOOK: type: DESCDATABASE
+INFO  : POSTHOOK: Input: database:escape_comments_db
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe database escape_comments_db
@@ -151,18 +150,18 @@ DEBUG : Shutting down query describe database escape_comments_db
 'escape_comments_db','a\nb','location/in/test','user','USER',''
 1 row selected 
 >>>  show create table escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): show create table escape_comments_tbl1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): show create table escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:createtab_stmt, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): show create table escape_comments_tbl1
-ERROR : PREHOOK: query: show create table escape_comments_tbl1
-ERROR : PREHOOK: type: SHOW_CREATETABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: query: show create table escape_comments_tbl1
+INFO  : PREHOOK: type: SHOW_CREATETABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: show create table escape_comments_tbl1
-ERROR : POSTHOOK: type: SHOW_CREATETABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: query: show create table escape_comments_tbl1
+INFO  : POSTHOOK: type: SHOW_CREATETABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query show create table escape_comments_tbl1
@@ -179,23 +178,23 @@ DEBUG : Shutting down query show create table escape_comments_tbl1
 'OUTPUTFORMAT '
 '  'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat''
 'LOCATION'
-'  '!!{hive.metastore.warehouse.dir}!!/escape_comments_db.db/escape_comments_tbl1''
+'  'file:/!!ELIDED!!
 'TBLPROPERTIES ('
 '  'transient_lastDdlTime'='!!UNIXTIME!!')'
 15 rows selected 
 >>>  describe formatted escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe formatted escape_comments_tbl1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe formatted escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col_name, type:string, comment:from deserializer), FieldSchema(name:data_type, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe formatted escape_comments_tbl1
-ERROR : PREHOOK: query: describe formatted escape_comments_tbl1
-ERROR : PREHOOK: type: DESCTABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: query: describe formatted escape_comments_tbl1
+INFO  : PREHOOK: type: DESCTABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe formatted escape_comments_tbl1
-ERROR : POSTHOOK: type: DESCTABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: query: describe formatted escape_comments_tbl1
+INFO  : POSTHOOK: type: DESCTABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe formatted escape_comments_tbl1
@@ -215,7 +214,7 @@ DEBUG : Shutting down query describe formatted escape_comments_tbl1
 'CreateTime:         ','!!TIMESTAMP!!','NULL'
 'LastAccessTime:     ','UNKNOWN             ','NULL'
 'Retention:          ','0                   ','NULL'
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/escape_comments_db.db/escape_comments_tbl1','NULL'
+'Location:           ','file:/!!ELIDED!!
 'Table Type:         ','MANAGED_TABLE       ','NULL'
 'Table Parameters:','NULL','NULL'
 '','COLUMN_STATS_ACCURATE','{\"BASIC_STATS\":\"true\"}'
@@ -239,18 +238,18 @@ DEBUG : Shutting down query describe formatted escape_comments_tbl1
 '','serialization.format','1                   '
 37 rows selected 
 >>>  describe pretty escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe pretty escape_comments_tbl1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe pretty escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col_name, type:string, comment:from deserializer), FieldSchema(name:data_type, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe pretty escape_comments_tbl1
-ERROR : PREHOOK: query: describe pretty escape_comments_tbl1
-ERROR : PREHOOK: type: DESCTABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: query: describe pretty escape_comments_tbl1
+INFO  : PREHOOK: type: DESCTABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe pretty escape_comments_tbl1
-ERROR : POSTHOOK: type: DESCTABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: query: describe pretty escape_comments_tbl1
+INFO  : POSTHOOK: type: DESCTABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe pretty escape_comments_tbl1
@@ -269,18 +268,18 @@ DEBUG : Shutting down query describe pretty escape_comments_tbl1
 '         ','              ','b'
 12 rows selected 
 >>>  describe escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe escape_comments_tbl1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col_name, type:string, comment:from deserializer), FieldSchema(name:data_type, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe escape_comments_tbl1
-ERROR : PREHOOK: query: describe escape_comments_tbl1
-ERROR : PREHOOK: type: DESCTABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: query: describe escape_comments_tbl1
+INFO  : PREHOOK: type: DESCTABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe escape_comments_tbl1
-ERROR : POSTHOOK: type: DESCTABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: query: describe escape_comments_tbl1
+INFO  : POSTHOOK: type: DESCTABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe escape_comments_tbl1
@@ -294,18 +293,18 @@ DEBUG : Shutting down query describe escape_comments_tbl1
 'p1','string','a\nb'
 7 rows selected 
 >>>  show create table escape_comments_view1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): show create table escape_comments_view1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): show create table escape_comments_view1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:createtab_stmt, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): show create table escape_comments_view1
-ERROR : PREHOOK: query: show create table escape_comments_view1
-ERROR : PREHOOK: type: SHOW_CREATETABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_view1
+INFO  : PREHOOK: query: show create table escape_comments_view1
+INFO  : PREHOOK: type: SHOW_CREATETABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_view1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: show create table escape_comments_view1
-ERROR : POSTHOOK: type: SHOW_CREATETABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_view1
+INFO  : POSTHOOK: query: show create table escape_comments_view1
+INFO  : POSTHOOK: type: SHOW_CREATETABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_view1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query show create table escape_comments_view1
@@ -313,18 +312,18 @@ DEBUG : Shutting down query show create table escape_comments_view1
 'CREATE VIEW `escape_comments_view1` AS SELECT `col1` AS `col1` FROM (select `escape_comments_tbl1`.`col1` from `escape_comments_db`.`escape_comments_tbl1`) `escape_comments_db.escape_comments_view1`'
 1 row selected 
 >>>  describe formatted escape_comments_view1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): describe formatted escape_comments_view1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): describe formatted escape_comments_view1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:col_name, type:string, comment:from deserializer), FieldSchema(name:data_type, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): describe formatted escape_comments_view1
-ERROR : PREHOOK: query: describe formatted escape_comments_view1
-ERROR : PREHOOK: type: DESCTABLE
-ERROR : PREHOOK: Input: escape_comments_db@escape_comments_view1
+INFO  : PREHOOK: query: describe formatted escape_comments_view1
+INFO  : PREHOOK: type: DESCTABLE
+INFO  : PREHOOK: Input: escape_comments_db@escape_comments_view1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: describe formatted escape_comments_view1
-ERROR : POSTHOOK: type: DESCTABLE
-ERROR : POSTHOOK: Input: escape_comments_db@escape_comments_view1
+INFO  : POSTHOOK: query: describe formatted escape_comments_view1
+INFO  : POSTHOOK: type: DESCTABLE
+INFO  : POSTHOOK: Input: escape_comments_db@escape_comments_view1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query describe formatted escape_comments_view1
@@ -359,16 +358,16 @@ DEBUG : Shutting down query describe formatted escape_comments_view1
 'View Rewrite Enabled:','No                  ','NULL'
 28 rows selected 
 >>>  show formatted index on escape_comments_tbl1;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): show formatted index on escape_comments_tbl1
+INFO  : Compiling commandqueryId=(!!{queryId}!!): show formatted index on escape_comments_tbl1
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:idx_name, type:string, comment:from deserializer), FieldSchema(name:tab_name, type:string, comment:from deserializer), FieldSchema(name:col_names, type:string, comment:from deserializer), FieldSchema(name:idx_tab_name, type:string, comment:from deserializer), FieldSchema(name:idx_type, type:string, comment:from deserializer), FieldSchema(name:comment, type:string, comment:from deserializer)], properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): show formatted index on escape_comments_tbl1
-ERROR : PREHOOK: query: show formatted index on escape_comments_tbl1
-ERROR : PREHOOK: type: SHOWINDEXES
+INFO  : PREHOOK: query: show formatted index on escape_comments_tbl1
+INFO  : PREHOOK: type: SHOWINDEXES
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: show formatted index on escape_comments_tbl1
-ERROR : POSTHOOK: type: SHOWINDEXES
+INFO  : POSTHOOK: query: show formatted index on escape_comments_tbl1
+INFO  : POSTHOOK: type: SHOWINDEXES
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query show formatted index on escape_comments_tbl1
@@ -380,26 +379,26 @@ DEBUG : Shutting down query show formatted index on escape_comments_tbl1
 4 rows selected 
 >>>  
 >>>  drop database escape_comments_db cascade;
-DEBUG : INFO  : Compiling commandqueryId=(!!{queryId}!!): drop database escape_comments_db cascade
+INFO  : Compiling commandqueryId=(!!{queryId}!!): drop database escape_comments_db cascade
 INFO  : Semantic Analysis Completed
 INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
 INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : Executing commandqueryId=(!!{queryId}!!): drop database escape_comments_db cascade
-ERROR : PREHOOK: query: drop database escape_comments_db cascade
-ERROR : PREHOOK: type: DROPDATABASE
-ERROR : PREHOOK: Input: database:escape_comments_db
-ERROR : PREHOOK: Output: database:escape_comments_db
-ERROR : PREHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
-ERROR : PREHOOK: Output: escape_comments_db@escape_comments_tbl1
-ERROR : PREHOOK: Output: escape_comments_db@escape_comments_view1
+INFO  : PREHOOK: query: drop database escape_comments_db cascade
+INFO  : PREHOOK: type: DROPDATABASE
+INFO  : PREHOOK: Input: database:escape_comments_db
+INFO  : PREHOOK: Output: database:escape_comments_db
+INFO  : PREHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
+INFO  : PREHOOK: Output: escape_comments_db@escape_comments_tbl1
+INFO  : PREHOOK: Output: escape_comments_db@escape_comments_view1
 INFO  : Starting task [Stage-0:DDL] in serial mode
-ERROR : POSTHOOK: query: drop database escape_comments_db cascade
-ERROR : POSTHOOK: type: DROPDATABASE
-ERROR : POSTHOOK: Input: database:escape_comments_db
-ERROR : POSTHOOK: Output: database:escape_comments_db
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
-ERROR : POSTHOOK: Output: escape_comments_db@escape_comments_view1
+INFO  : POSTHOOK: query: drop database escape_comments_db cascade
+INFO  : POSTHOOK: type: DROPDATABASE
+INFO  : POSTHOOK: Input: database:escape_comments_db
+INFO  : POSTHOOK: Output: database:escape_comments_db
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
+INFO  : POSTHOOK: Output: escape_comments_db@escape_comments_view1
 INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
 INFO  : OK
 DEBUG : Shutting down query drop database escape_comments_db cascade


[3/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
new file mode 100644
index 0000000..b8a06dc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_16.q.out
@@ -0,0 +1,254 @@
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  set hive.merge.mapfiles=false;
+No rows affected 
+>>>  set hive.merge.mapredfiles=false; 
+No rows affected 
+>>>  
+>>>  -- Create bucketed and sorted tables
+>>>  CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_16
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_16
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_16
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_16
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+No rows affected 
+>>>  
+>>>  FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : PREHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : PREHOOK: Output: smb_mapjoin_16@test_table2
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 2
+INFO  : Launching Job 1 out of 2
+INFO  : Starting task [Stage-2:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_16.test_table1 from file:/!!ELIDED!!
+INFO  : Launching Job 2 out of 2
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-3:STATS] in serial mode
+INFO  : Starting task [Stage-1:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_16.test_table2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-5:STATS] in serial mode
+INFO  : POSTHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table1
+INFO  : POSTHOOK: Output: smb_mapjoin_16@test_table2
+INFO  : POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-2:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+No rows affected 
+>>>  
+>>>  -- Mapjoin followed by a aggregation should be performed in a single MR job
+>>>  EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: query: EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN
+SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                Group By Operator'
+'                  aggregations: count()'
+'                  mode: hash'
+'                  outputColumnNames: _col0'
+'                  Reduce Output Operator'
+'                    sort order: '
+'                    value expressions: _col0 (type: bigint)'
+'      Reduce Operator Tree:'
+'        Group By Operator'
+'          aggregations: count(VALUE._col0)'
+'          mode: mergepartial'
+'          outputColumnNames: _col0'
+'          File Output Operator'
+'            compressed: false'
+'            table:'
+'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+45 rows selected 
+>>>  SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c1, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_16@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_16@test_table2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_16@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_16@test_table2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+'_c1'
+'1028'
+1 row selected 
+>>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
new file mode 100644
index 0000000..22a2d6a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
@@ -0,0 +1,955 @@
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_2
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_2
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_2.smb_bucket_3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_2@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+No rows affected 
+>>>   
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  -- SORT_QUERY_RESULTS
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+9 rows selected 
+>>>  
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'4','val_4','4','val_4'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_2@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','4','val_4'
+'5','val_5','NULL','NULL'
+'10','val_10','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+9 rows selected 
+>>>  
+>>>   
+>>>  
+>>>  
+>>>  
+>>>  !record


[2/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
new file mode 100644
index 0000000..6c9b8e4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
@@ -0,0 +1,950 @@
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  -- SORT_QUERY_RESULTS
+>>>  
+>>>  
+>>>  
+>>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_3
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_3
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_3.smb_bucket_3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_3@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+No rows affected 
+>>>  
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>   
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+8 rows selected 
+>>>  
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+2 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+6 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : PREHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_2
+INFO  : POSTHOOK: Input: smb_mapjoin_3@smb_bucket_3
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','4','val_4'
+'NULL','NULL','10','val_10'
+'NULL','NULL','17','val_17'
+'NULL','NULL','19','val_19'
+'20','val_20','20','val_20'
+'23','val_23','23','val_23'
+'25','val_25','NULL','NULL'
+'30','val_30','NULL','NULL'
+8 rows selected 
+>>>  
+>>>   
+>>>  
+>>>  
+>>>  
+>>>  !record


[4/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
new file mode 100644
index 0000000..98bf25e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
@@ -0,0 +1,822 @@
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  
+>>>  
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  set hive.merge.mapfiles=false;
+No rows affected 
+>>>  set hive.merge.mapredfiles=false; 
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted
+>>>  
+>>>  -- Create two bucketed and sorted tables
+>>>  CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_12
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_12
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_12
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_12
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  
+>>>  FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *
+INFO  : PREHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table1@ds=1
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table2@ds=1
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table2@ds=2
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table2@ds=3
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 4
+INFO  : Launching Job 1 out of 4
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table1 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Launching Job 2 out of 4
+INFO  : Starting task [Stage-6:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 3 out of 4
+INFO  : Starting task [Stage-8:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 4 out of 4
+INFO  : Starting task [Stage-10:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-5:STATS] in serial mode
+INFO  : Starting task [Stage-1:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table2 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table2 partition (ds=2) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-3:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table2 partition (ds=3) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-7:STATS] in serial mode
+INFO  : Starting task [Stage-9:STATS] in serial mode
+INFO  : Starting task [Stage-11:STATS] in serial mode
+INFO  : POSTHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table1@ds=1
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table2@ds=1
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table2@ds=2
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table2@ds=3
+INFO  : POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2 PARTITION(ds=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-6:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-8:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-10:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query FROM default.src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  -- Create a bucketed table
+>>>  CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_12
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_12
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+No rows affected 
+>>>  
+>>>  -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced
+>>>  EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : PREHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-4:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+'  Stage-2 depends on stages: Stage-0'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col7'
+'                Position of Big Table: 0'
+'                BucketMapJoin: true'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col7 (type: string)'
+'                  outputColumnNames: _col0, _col1'
+'                  File Output Operator'
+'                    compressed: false'
+'                    GlobalTableId: 1'
+'                    directory: file:/!!ELIDED!!
+'                    NumFilesPerFileSink: 1'
+'                    Static Partition Specification: ds=1/'
+'                    Stats Publishing Key Prefix: file:/!!ELIDED!!
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.TextInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                        properties:'
+'                          SORTBUCKETCOLSPREFIX TRUE'
+'                          bucket_count 16'
+'                          bucket_field_name key'
+'                          column.name.delimiter ,'
+'                          columns key,value'
+'                          columns.comments '
+'                          columns.types int:string'
+'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                          location file:/!!ELIDED!!
+'                          name smb_mapjoin_12.test_table3'
+'                          partition_columns ds'
+'                          partition_columns.types string'
+'                          serialization.ddl struct test_table3 { i32 key, string value}'
+'                          serialization.format 1'
+'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                          transient_lastDdlTime !!UNIXTIME!!'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                        name: smb_mapjoin_12.test_table3'
+'                    TotalFiles: 1'
+'                    GatherStats: true'
+'                    MultiFileSpray: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [a]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: ds=1'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            partition values:'
+'              ds 1'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_12.test_table1'
+'              numFiles 16'
+'              numRows 500'
+'              partition_columns ds'
+'              partition_columns.types string'
+'              rawDataSize 5312'
+'              serialization.ddl struct test_table1 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 5812'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_12.test_table1'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table1 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_12.test_table1'
+'            name: smb_mapjoin_12.test_table1'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_12.db/test_table1/ds=1 [a]'
+''
+'  Stage: Stage-0'
+'    Move Operator'
+'      tables:'
+'          partition:'
+'            ds 1'
+'          replace: true'
+'          source: file:/!!ELIDED!!
+'          table:'
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_12.test_table3'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_12.test_table3'
+''
+'  Stage: Stage-2'
+'    Stats-Aggr Operator'
+'      Stats Aggregation Key Prefix: file:/!!ELIDED!!
+''
+157 rows selected 
+>>>  
+>>>  INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table2
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table2@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table2@ds=2
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table2@ds=3
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table3@ds=1
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table3 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table2
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table2@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table2@ds=2
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table2@ds=3
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table3@ds=1
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
+No rows affected 
+>>>  
+>>>  -- Join data from a sampled bucket to verify the data is bucketed
+>>>  SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c0, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3@ds=1
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 2
+INFO  : Launching Job 1 out of 2
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks not specified. Estimated from input data size: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:2
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 2 out of 2
+INFO  : Starting task [Stage-2:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3@ds=1
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-2:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+'_c0'
+'879'
+1 row selected 
+>>>  
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  
+>>>  -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted
+>>>  explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: query: explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-4:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain extended
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+'  Stage-2 depends on stages: Stage-0'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 3084 Data size: 32904 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 3084 Data size: 32904 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col7'
+'                Position of Big Table: 0'
+'                BucketMapJoin: true'
+'                Select Operator'
+'                  expressions: _col0 (type: int), concat(_col1, _col7) (type: string)'
+'                  outputColumnNames: _col0, _col1'
+'                  File Output Operator'
+'                    compressed: false'
+'                    GlobalTableId: 1'
+'                    directory: file:/!!ELIDED!!
+'                    NumFilesPerFileSink: 1'
+'                    Static Partition Specification: ds=2/'
+'                    Stats Publishing Key Prefix: file:/!!ELIDED!!
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.TextInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                        properties:'
+'                          SORTBUCKETCOLSPREFIX TRUE'
+'                          bucket_count 16'
+'                          bucket_field_name key'
+'                          column.name.delimiter ,'
+'                          columns key,value'
+'                          columns.comments '
+'                          columns.types int:string'
+'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                          location file:/!!ELIDED!!
+'                          name smb_mapjoin_12.test_table3'
+'                          partition_columns ds'
+'                          partition_columns.types string'
+'                          serialization.ddl struct test_table3 { i32 key, string value}'
+'                          serialization.format 1'
+'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                          transient_lastDdlTime !!UNIXTIME!!'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                        name: smb_mapjoin_12.test_table3'
+'                    TotalFiles: 1'
+'                    GatherStats: true'
+'                    MultiFileSpray: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [a]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: ds=1'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            partition values:'
+'              ds 1'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_12.test_table3'
+'              numFiles 16'
+'              numRows 3084'
+'              partition_columns ds'
+'              partition_columns.types string'
+'              rawDataSize 32904'
+'              serialization.ddl struct test_table3 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 35988'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_12.test_table3'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_12.test_table3'
+'            name: smb_mapjoin_12.test_table3'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_12.db/test_table3/ds=1 [a]'
+''
+'  Stage: Stage-0'
+'    Move Operator'
+'      tables:'
+'          partition:'
+'            ds 2'
+'          replace: true'
+'          source: file:/!!ELIDED!!
+'          table:'
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_12.test_table3'
+'                partition_columns ds'
+'                partition_columns.types string'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_12.test_table3'
+''
+'  Stage: Stage-2'
+'    Stats-Aggr Operator'
+'      Stats Aggregation Key Prefix: file:/!!ELIDED!!
+''
+157 rows selected 
+>>>  
+>>>  INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:_c2, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3@ds=1
+INFO  : PREHOOK: Output: smb_mapjoin_12@test_table3@ds=2
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_12.test_table3 partition (ds=2) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:STATS] in serial mode
+INFO  : POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table1@ds=1
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3@ds=1
+INFO  : POSTHOOK: Output: smb_mapjoin_12@test_table3@ds=2
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=2).key SIMPLE [(test_table3)a.FieldSchema(name:key, type:int, comment:null), ]
+INFO  : POSTHOOK: Lineage: test_table3 PARTITION(ds=2).value EXPRESSION [(test_table3)a.FieldSchema(name:value, type:string, comment:null), (test_table1)b.FieldSchema(name:value, type:string, comment:null), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 
+SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
+No rows affected 
+>>>  
+>>>  SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2';
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_c0, type:bigint, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'
+INFO  : PREHOOK: query: SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_12@test_table3@ds=2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_12@test_table3@ds=2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'
+'_c0'
+'879'
+1 row selected 
+>>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
new file mode 100644
index 0000000..d303900
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out
@@ -0,0 +1,687 @@
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  
+>>>  set hive.exec.reducers.max = 1;
+No rows affected 
+>>>  set hive.merge.mapfiles=false;
+No rows affected 
+>>>  set hive.merge.mapredfiles=false; 
+No rows affected 
+>>>  
+>>>  -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names
+>>>  
+>>>  -- Create bucketed and sorted tables
+>>>  CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_13
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_13
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_13
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_13
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_13
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_13
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+No rows affected 
+>>>  CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_13
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table4
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_13
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table4
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS
+No rows affected 
+>>>  
+>>>  FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:_col0, type:int, comment:null), FieldSchema(name:_col1, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *
+INFO  : PREHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: default@src
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table1
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table2
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table3
+INFO  : PREHOOK: Output: smb_mapjoin_13@test_table4
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 4
+INFO  : Launching Job 1 out of 4
+INFO  : Starting task [Stage-4:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_13.test_table1 from file:/!!ELIDED!!
+INFO  : Launching Job 2 out of 4
+INFO  : Starting task [Stage-6:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 3 out of 4
+INFO  : Starting task [Stage-8:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Launching Job 4 out of 4
+INFO  : Starting task [Stage-10:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : Starting task [Stage-5:STATS] in serial mode
+INFO  : Starting task [Stage-1:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_13.test_table2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-2:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_13.test_table3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-3:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_13.test_table4 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-7:STATS] in serial mode
+INFO  : Starting task [Stage-9:STATS] in serial mode
+INFO  : Starting task [Stage-11:STATS] in serial mode
+INFO  : POSTHOOK: query: FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: default@src
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table1
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table2
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table3
+INFO  : POSTHOOK: Output: smb_mapjoin_13@test_table4
+INFO  : POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table2.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+INFO  : POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-4:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-6:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-8:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Stage-Stage-10:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query FROM default.src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+INSERT OVERWRITE TABLE test_table3 SELECT *
+INSERT OVERWRITE TABLE test_table4 SELECT *
+No rows affected 
+>>>  
+>>>  -- Join data from 2 tables on their respective sorted columns (one each, with different names) and
+>>>  -- verify sort merge join is used
+>>>  EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: query: EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 value (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Position of Big Table: 0'
+'                BucketMapJoin: true'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  Reduce Output Operator'
+'                    key expressions: _col0 (type: int)'
+'                    null sort order: a'
+'                    sort order: +'
+'                    tag: -1'
+'                    TopN: 10'
+'                    TopN Hash Memory Usage: 0.1'
+'                    value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)'
+'                    auto parallelism: false'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [a]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: test_table1'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              SORTBUCKETCOLSPREFIX TRUE'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_13.test_table1'
+'              numFiles 16'
+'              numRows 500'
+'              rawDataSize 5312'
+'              serialization.ddl struct test_table1 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 5812'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_13.test_table1'
+'                numFiles 16'
+'                numRows 500'
+'                rawDataSize 5312'
+'                serialization.ddl struct test_table1 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                totalSize 5812'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_13.test_table1'
+'            name: smb_mapjoin_13.test_table1'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_13.db/test_table1 [a]'
+'      Needs Tagging: false'
+'      Reduce Operator Tree:'
+'        Select Operator'
+'          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: string)'
+'          outputColumnNames: _col0, _col1, _col2, _col3'
+'          Limit'
+'            Number of rows: 10'
+'            File Output Operator'
+'              compressed: false'
+'              GlobalTableId: 0'
+'              directory: file:/!!ELIDED!!
+'              NumFilesPerFileSink: 1'
+'              Stats Publishing Key Prefix: file:/!!ELIDED!!
+'              table:'
+'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                  properties:'
+'                    columns _col0,_col1,_col2,_col3'
+'                    columns.types int:string:int:string'
+'                    escape.delim \'
+'                    hive.serialization.extend.additional.nesting.levels true'
+'                    serialization.escape.crlf true'
+'                    serialization.format 1'
+'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              TotalFiles: 1'
+'              GatherStats: false'
+'              MultiFileSpray: false'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: 10'
+'      Processor Tree:'
+'        ListSink'
+''
+131 rows selected 
+>>>  
+>>>  SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.value, type:int, comment:null), FieldSchema(name:b.key, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_13@test_table1
+INFO  : PREHOOK: Input: smb_mapjoin_13@test_table2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_13@test_table1
+INFO  : POSTHOOK: Input: smb_mapjoin_13@test_table2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10
+'a.key','a.value','b.value','b.key'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'0','val_0','0','val_0'
+'2','val_2','2','val_2'
+10 rows selected 
+>>>  
+>>>  -- Join data from 2 tables on their respective columns (two each, with the same names but sorted
+>>>  -- with different priorities) and verify sort merge join is not used
+>>>  EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: query: EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-4:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query EXPLAIN EXTENDED
+SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-3 is a root stage'
+'  Stage-1 depends on stages: Stage-3'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-3'
+'    Map Reduce Local Work'
+'      Alias -> Map Local Tables:'
+'        b '
+'          Fetch Operator'
+'            limit: -1'
+'      Alias -> Map Local Operator Tree:'
+'        b '
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: UDFToDouble(value) is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              HashTable Sink Operator'
+'                keys:'
+'                  0 UDFToDouble(key) (type: double)'
+'                  1 UDFToDouble(value) (type: double)'
+'                Position of Big Table: 0'
+''
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'            GatherStats: false'
+'            Filter Operator'
+'              isSamplingPred: false'
+'              predicate: UDFToDouble(key) is not null (type: boolean)'
+'              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE'
+'              Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 UDFToDouble(key) (type: double)'
+'                  1 UDFToDouble(value) (type: double)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Position of Big Table: 0'
+'                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE'
+'                  Reduce Output Operator'
+'                    key expressions: _col0 (type: int)'
+'                    null sort order: a'
+'                    sort order: +'
+'                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE'
+'                    tag: -1'
+'                    TopN: 10'
+'                    TopN Hash Memory Usage: 0.1'
+'                    value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)'
+'                    auto parallelism: false'
+'      Local Work:'
+'        Map Reduce Local Work'
+'      Path -> Alias:'
+'        file:/!!ELIDED!! [a]'
+'      Path -> Partition:'
+'        file:/!!ELIDED!! '
+'          Partition'
+'            base file name: test_table3'
+'            input format: org.apache.hadoop.mapred.TextInputFormat'
+'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'            properties:'
+'              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'              SORTBUCKETCOLSPREFIX TRUE'
+'              bucket_count 16'
+'              bucket_field_name key'
+'              column.name.delimiter ,'
+'              columns key,value'
+'              columns.comments '
+'              columns.types int:string'
+'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              location file:/!!ELIDED!!
+'              name smb_mapjoin_13.test_table3'
+'              numFiles 16'
+'              numRows 500'
+'              rawDataSize 5312'
+'              serialization.ddl struct test_table3 { i32 key, string value}'
+'              serialization.format 1'
+'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              totalSize 5812'
+'              transient_lastDdlTime !!UNIXTIME!!'
+'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'          '
+'              input format: org.apache.hadoop.mapred.TextInputFormat'
+'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'              properties:'
+'                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}'
+'                SORTBUCKETCOLSPREFIX TRUE'
+'                bucket_count 16'
+'                bucket_field_name key'
+'                column.name.delimiter ,'
+'                columns key,value'
+'                columns.comments '
+'                columns.types int:string'
+'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
+'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+'                location file:/!!ELIDED!!
+'                name smb_mapjoin_13.test_table3'
+'                numFiles 16'
+'                numRows 500'
+'                rawDataSize 5312'
+'                serialization.ddl struct test_table3 { i32 key, string value}'
+'                serialization.format 1'
+'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                totalSize 5812'
+'                transient_lastDdlTime !!UNIXTIME!!'
+'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              name: smb_mapjoin_13.test_table3'
+'            name: smb_mapjoin_13.test_table3'
+'      Truncated Path -> Alias:'
+'        /smb_mapjoin_13.db/test_table3 [a]'
+'      Needs Tagging: false'
+'      Reduce Operator Tree:'
+'        Select Operator'
+'          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: string)'
+'          outputColumnNames: _col0, _col1, _col2, _col3'
+'          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE'
+'          Limit'
+'            Number of rows: 10'
+'            Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE'
+'            File Output Operator'
+'              compressed: false'
+'              GlobalTableId: 0'
+'              directory: file:/!!ELIDED!!
+'              NumFilesPerFileSink: 1'
+'              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE'
+'              Stats Publishing Key Prefix: file:/!!ELIDED!!
+'              table:'
+'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                  properties:'
+'                    columns _col0,_col1,_col2,_col3'
+'                    columns.types int:string:int:string'
+'                    escape.delim \'
+'                    hive.serialization.extend.additional.nesting.levels true'
+'                    serialization.escape.crlf true'
+'                    serialization.format 1'
+'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+'              TotalFiles: 1'
+'              GatherStats: false'
+'              MultiFileSpray: false'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: 10'
+'      Processor Tree:'
+'        ListSink'
+''
+161 rows selected 
+>>>  
+>>>  SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_13@test_table3
+INFO  : PREHOOK: Input: smb_mapjoin_13@test_table4
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Starting task [Stage-3:MAPREDLOCAL] in serial mode
+INFO  : Starting to launch local task to process map join;	maximum memory = !!ELIDED!!
+INFO  : End of local task; Time taken: !!ELIDED!! sec.
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks determined at compile time: 1
+INFO  : In order to change the average load for a reducer (in bytes):
+INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+INFO  : In order to limit the maximum number of reducers:
+INFO  :   set hive.exec.reducers.max=<number>
+INFO  : In order to set a constant number of reducers:
+INFO  :   set mapreduce.job.reduces=<number>
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:16
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_13@test_table3
+INFO  : POSTHOOK: Input: smb_mapjoin_13@test_table4
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10
+'a.key','a.value','b.key','b.value'
+No rows selected 
+>>>  !record


[6/7] hive git commit: HIVE-16345: BeeLineDriver should be able to run qtest files which are using default database tables (Peter Vary via Yongzhi Chen)

Posted by yc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
new file mode 100644
index 0000000..70a37ca
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
@@ -0,0 +1,948 @@
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  
+>>>  create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_1
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_1
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_1
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_1
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_2
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_1
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_1
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_3
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_1
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_1.smb_bucket_1 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_1.smb_bucket_2 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_1@smb_bucket_3
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_1.smb_bucket_3 from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_1@smb_bucket_3
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+No rows affected 
+>>>  
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  
+>>>  -- SORT_QUERY_RESULTS
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+No rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','NULL','NULL'
+'5','val_5','NULL','NULL'
+'10','val_10','NULL','NULL'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+'NULL','NULL','25','val_25'
+'NULL','NULL','30','val_30'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','NULL','NULL'
+'5','val_5','NULL','NULL'
+'10','val_10','NULL','NULL'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+'NULL','NULL','25','val_25'
+'NULL','NULL','30','val_30'
+9 rows selected 
+>>>  
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: key is not null (type: boolean)'
+'              Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 key (type: int)'
+'                  1 key (type: int)'
+'                outputColumnNames: _col0, _col1, _col5, _col6'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+No rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Left Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','NULL','NULL'
+'5','val_5','NULL','NULL'
+'10','val_10','NULL','NULL'
+5 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Right Outer Join0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+'NULL','NULL','25','val_25'
+'NULL','NULL','30','val_30'
+4 rows selected 
+>>>  
+>>>  explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: a'
+'            Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE'
+'            Sorted Merge Bucket Map Join Operator'
+'              condition map:'
+'                   Outer Join 0 to 1'
+'              keys:'
+'                0 key (type: int)'
+'                1 key (type: int)'
+'              outputColumnNames: _col0, _col1, _col5, _col6'
+'              Select Operator'
+'                expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3'
+'                File Output Operator'
+'                  compressed: false'
+'                  table:'
+'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+34 rows selected 
+>>>  select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key;
+INFO  : Compiling commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:a.key, type:int, comment:null), FieldSchema(name:a.value, type:string, comment:null), FieldSchema(name:b.key, type:int, comment:null), FieldSchema(name:b.value, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : PREHOOK: type: QUERY
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : PREHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : PREHOOK: Output: file:/!!ELIDED!!
+WARN  : Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+INFO  : Query ID = !!{queryId}!!
+INFO  : Total jobs = 1
+INFO  : Launching Job 1 out of 1
+INFO  : Starting task [Stage-1:MAPRED] in serial mode
+INFO  : Number of reduce tasks is set to 0 since there's no reduce operator
+DEBUG : Configuring job !!{jobId}}!! with file:/!!ELIDED!! as the submit dir
+DEBUG : adding the following namenodes' delegation tokens:[file:///]
+DEBUG : Creating splits at file:/!!ELIDED!!
+INFO  : number of splits:1
+INFO  : Submitting tokens for job: !!{jobId}}!!
+INFO  : The url to track the job: http://localhost:8080/
+INFO  : Job running in-process (local Hadoop)
+INFO  : Ended Job = !!{jobId}!!
+INFO  : POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+INFO  : POSTHOOK: type: QUERY
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_1
+INFO  : POSTHOOK: Input: smb_mapjoin_1@smb_bucket_2
+INFO  : POSTHOOK: Output: file:/!!ELIDED!!
+INFO  : MapReduce Jobs Launched: 
+INFO  : Stage-Stage-1:  HDFS Read: 0 HDFS Write: 0 SUCCESS
+INFO  : Total MapReduce CPU Time Spent: 0 msec
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key
+'a.key','a.value','b.key','b.value'
+'1','val_1','NULL','NULL'
+'3','val_3','NULL','NULL'
+'4','val_4','NULL','NULL'
+'5','val_5','NULL','NULL'
+'10','val_10','NULL','NULL'
+'NULL','NULL','20','val_20'
+'NULL','NULL','23','val_23'
+'NULL','NULL','25','val_25'
+'NULL','NULL','30','val_30'
+9 rows selected 
+>>>  
+>>>   
+>>>  
+>>>  
+>>>  
+>>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/a2ce7f3d/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
new file mode 100644
index 0000000..bdfaefb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_10.q.out
@@ -0,0 +1,248 @@
+>>>  set hive.strict.checks.bucketing=false;
+No rows affected 
+>>>  
+>>>  
+>>>  create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE; 
+INFO  : Compiling commandqueryId=(!!{queryId}!!): create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: query: create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
+INFO  : PREHOOK: type: CREATETABLE
+INFO  : PREHOOK: Output: database:smb_mapjoin_10
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
+INFO  : POSTHOOK: type: CREATETABLE
+INFO  : POSTHOOK: Output: database:smb_mapjoin_10
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query create table tmp_smb_bucket_10(userid int, pageid int, postid int, type string) partitioned by (ds string) CLUSTERED BY (userid) SORTED BY (pageid, postid, type, userid) INTO 2 BUCKETS STORED AS RCFILE
+No rows affected 
+>>>  
+>>>  alter table tmp_smb_bucket_10 add partition (ds = '1');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): alter table tmp_smb_bucket_10 add partition (ds = '1')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): alter table tmp_smb_bucket_10 add partition (ds = '1')
+INFO  : PREHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '1')
+INFO  : PREHOOK: type: ALTERTABLE_ADDPARTS
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '1')
+INFO  : POSTHOOK: type: ALTERTABLE_ADDPARTS
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query alter table tmp_smb_bucket_10 add partition (ds = '1')
+No rows affected 
+>>>  alter table tmp_smb_bucket_10 add partition (ds = '2');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): alter table tmp_smb_bucket_10 add partition (ds = '2')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): alter table tmp_smb_bucket_10 add partition (ds = '2')
+INFO  : PREHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2')
+INFO  : PREHOOK: type: ALTERTABLE_ADDPARTS
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : Starting task [Stage-0:DDL] in serial mode
+INFO  : POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2')
+INFO  : POSTHOOK: type: ALTERTABLE_ADDPARTS
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query alter table tmp_smb_bucket_10 add partition (ds = '2')
+No rows affected 
+>>>  
+>>>  -- add dummy files to make sure that the number of files in each partition is same as number of buckets
+>>>   
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_10.tmp_smb_bucket_10 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_10.tmp_smb_bucket_10 partition (ds=1) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=1
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1')
+No rows affected 
+>>>  
+>>>  load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_10.tmp_smb_bucket_10 partition (ds=2) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+No rows affected 
+>>>  load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
+INFO  : Compiling commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:null, properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : PREHOOK: type: LOAD
+INFO  : PREHOOK: Input: file:/!!ELIDED!!
+INFO  : PREHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
+INFO  : Starting task [Stage-0:MOVE] in serial mode
+INFO  : Loading data to table smb_mapjoin_10.tmp_smb_bucket_10 partition (ds=2) from file:/!!ELIDED!!
+INFO  : Starting task [Stage-1:STATS] in serial mode
+INFO  : POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+INFO  : POSTHOOK: type: LOAD
+INFO  : POSTHOOK: Input: file:/!!ELIDED!!
+INFO  : POSTHOOK: Output: smb_mapjoin_10@tmp_smb_bucket_10@ds=2
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2')
+No rows affected 
+>>>  set hive.cbo.enable=false;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin = true;
+No rows affected 
+>>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
+No rows affected 
+>>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+No rows affected 
+>>>  
+>>>  explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type);
+INFO  : Compiling commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type)
+INFO  : Semantic Analysis Completed
+INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:Explain, type:string, comment:null)], properties:null)
+INFO  : Completed compiling commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : Executing commandqueryId=(!!{queryId}!!): explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type)
+INFO  : PREHOOK: query: explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type)
+INFO  : PREHOOK: type: QUERY
+INFO  : Starting task [Stage-3:EXPLAIN] in serial mode
+INFO  : POSTHOOK: query: explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type)
+INFO  : POSTHOOK: type: QUERY
+INFO  : Completed executing commandqueryId=(!!{queryId}!!); Time taken: !!ELIDED!! seconds
+INFO  : OK
+DEBUG : Shutting down query explain
+select /*+mapjoin(a)*/ * from tmp_smb_bucket_10 a join tmp_smb_bucket_10 b 
+on (a.ds = '1' and b.ds = '2' and
+    a.userid = b.userid and
+    a.pageid = b.pageid and
+    a.postid = b.postid and
+    a.type = b.type)
+'Explain'
+'STAGE DEPENDENCIES:'
+'  Stage-1 is a root stage'
+'  Stage-0 depends on stages: Stage-1'
+''
+'STAGE PLANS:'
+'  Stage: Stage-1'
+'    Map Reduce'
+'      Map Operator Tree:'
+'          TableScan'
+'            alias: b'
+'            Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE'
+'            Filter Operator'
+'              predicate: (userid is not null and pageid is not null and postid is not null and type is not null) (type: boolean)'
+'              Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE'
+'              Sorted Merge Bucket Map Join Operator'
+'                condition map:'
+'                     Inner Join 0 to 1'
+'                keys:'
+'                  0 userid (type: int), pageid (type: int), postid (type: int), type (type: string)'
+'                  1 userid (type: int), pageid (type: int), postid (type: int), type (type: string)'
+'                outputColumnNames: _col0, _col1, _col2, _col3, _col8, _col9, _col10, _col11'
+'                Select Operator'
+'                  expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string), '1' (type: string), _col8 (type: int), _col9 (type: int), _col10 (type: int), _col11 (type: string), '2' (type: string)'
+'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9'
+'                  File Output Operator'
+'                    compressed: false'
+'                    table:'
+'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
+'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
+'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+''
+'  Stage: Stage-0'
+'    Fetch Operator'
+'      limit: -1'
+'      Processor Tree:'
+'        ListSink'
+''
+37 rows selected 
+>>>  
+>>>  !record